Compare commits
26 Commits
ui_improve
...
version3.4
| Author | SHA1 | Date | |
|---|---|---|---|
| eabd9d312f | |||
| 0da6fe78ac | |||
| be990380a0 | |||
| 9c0bc48420 | |||
| 37fc550652 | |||
| 2c1d6ac212 | |||
| 8c699c1b26 | |||
| c620fa9011 | |||
| f16fd60211 | |||
| 9674e59d26 | |||
| 643c5e125a | |||
| e5099e1daa | |||
| 3e621bbec1 | |||
| bb1d5a61c0 | |||
| fd3d0be2d8 | |||
| ae623258f3 | |||
| cda281f08b | |||
| 9f8e7a6efa | |||
| 57643dd2b6 | |||
| 6bc8a78cfe | |||
| d2700e97fb | |||
| c4dd81dc9a | |||
| e9b06d7cde | |||
| 6e6ea69611 | |||
| 16c17eb077 | |||
| 59877dd728 |
44
.gitignore
vendored
44
.gitignore
vendored
@ -2,14 +2,15 @@
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
plugins/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
@ -25,6 +26,7 @@ share/python-wheels/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
@ -33,6 +35,7 @@ MANIFEST
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
@ -46,64 +49,91 @@ coverage.xml
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
github
|
||||
.github
|
||||
TEMP
|
||||
TRASH
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.direnv/
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv*/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
.vscode
|
||||
.idea
|
||||
|
||||
history
|
||||
ssr_conf
|
||||
config_private.py
|
||||
@ -115,12 +145,8 @@ cradle*
|
||||
debug*
|
||||
private*
|
||||
crazy_functions/test_project/pdf_and_word
|
||||
crazy_fun
|
||||
ctions/test_samples
|
||||
crazy_functions/test_samples
|
||||
request_llm/jittorllms
|
||||
users_data/*
|
||||
request_llm/moss
|
||||
multi-language
|
||||
request_llm/moss
|
||||
media
|
||||
__test.py
|
||||
22
Dockerfile
22
Dockerfile
@ -1,28 +1,34 @@
|
||||
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
||||
# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic .
|
||||
# 如何运行: docker run --rm -it --net=host gpt-academic
|
||||
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型或者latex运行依赖,请参考 docker-compose.yml
|
||||
# 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
|
||||
# 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
|
||||
# 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
|
||||
FROM python:3.11
|
||||
|
||||
|
||||
# 非必要步骤,更换pip源
|
||||
RUN echo '[global]' > /etc/pip.conf && \
|
||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
|
||||
# 进入工作路径
|
||||
WORKDIR /gpt
|
||||
|
||||
|
||||
|
||||
|
||||
# 安装依赖
|
||||
# 安装大部分依赖,利用Docker缓存加速以后的构建
|
||||
COPY requirements.txt ./
|
||||
COPY ./docs/gradio-3.32.2-py3-none-any.whl ./docs/gradio-3.32.2-py3-none-any.whl
|
||||
RUN pip3 install -r requirements.txt
|
||||
# 装载项目文件
|
||||
|
||||
|
||||
# 装载项目文件,安装剩余依赖
|
||||
COPY . .
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
# 可选步骤,用于预热模块
|
||||
|
||||
# 非必要步骤,用于预热模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
|
||||
45
README.md
45
README.md
@ -1,24 +1,24 @@
|
||||
> **Note**
|
||||
>
|
||||
> 2023.5.27 对Gradio依赖进行了调整,Fork并解决了官方Gradio的若干Bugs。请及时**更新代码**并重新更新pip依赖。安装依赖时,请严格选择`requirements.txt`中**指定的版本**:
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
> 2023.7.5: 对Gradio依赖进行了调整。请及时**更新代码**。安装依赖时,请严格选择`requirements.txt`中**指定的版本**:
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
|
||||
# <img src="docs/logo.png" width="40" > GPT 学术优化 (GPT Academic)
|
||||
|
||||
**如果喜欢这个项目,请给它一个Star;如果你发明了更好用的快捷键或函数插件,欢迎发pull requests**
|
||||
# <div align=center><img src="docs/logo.png" width="40" > GPT 学术优化 (GPT Academic)</div>
|
||||
|
||||
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或函数插件,欢迎发pull requests!**
|
||||
|
||||
If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself.
|
||||
To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1.请注意只有**红颜色**标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR!
|
||||
> 1.请注意只有 **高亮(如红色)** 标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。
|
||||
>
|
||||
> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。[安装方法](#installation)。
|
||||
>
|
||||
> 3.本项目兼容并鼓励尝试国产大语言模型chatglm和RWKV, 盘古等等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,api2d-key3"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
|
||||
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,api2d-key3"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
|
||||
|
||||
|
||||
|
||||
@ -42,13 +42,13 @@ chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
|
||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
互联网信息聚合+GPT | [函数插件] 一键[让GPT先从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck),再回答问题,让信息永不过时
|
||||
⭐Arxiv论文精细翻译 | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),迄今为止最好的论文翻译工具⭐
|
||||
互联网信息聚合+GPT | [函数插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
|
||||
⭐Arxiv论文精细翻译 | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
|
||||
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
|
||||
多线程函数插件支持 | 支持多线调用chatgpt,一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序
|
||||
启动暗色gradio[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
|
||||
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
|
||||
更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama),[RWKV](https://github.com/BlinkDL/ChatRWKV)和[盘古α](https://openi.org.cn/pangu/)
|
||||
更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
|
||||
更多新功能展示(图像生成等) …… | 见本文档结尾处 ……
|
||||
|
||||
</div>
|
||||
@ -85,9 +85,8 @@ chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
# Installation
|
||||
## 安装-方法1:直接运行 (Windows, Linux or MacOS)
|
||||
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
||||
|
||||
1. 下载项目
|
||||
```sh
|
||||
@ -140,7 +139,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-
|
||||
python main.py
|
||||
```
|
||||
|
||||
## 安装-方法2:使用Docker
|
||||
### 安装方法II:使用Docker
|
||||
|
||||
1. 仅ChatGPT(推荐大多数人选择,等价于docker-compose方案1)
|
||||
|
||||
@ -171,7 +170,7 @@ docker-compose up
|
||||
```
|
||||
|
||||
|
||||
## 安装-方法3:其他部署姿势
|
||||
### 安装方法III:其他部署姿势
|
||||
1. 一键运行脚本。
|
||||
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。
|
||||
脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。
|
||||
@ -194,11 +193,9 @@ docker-compose up
|
||||
7. 如何在二级网址(如`http://localhost/subpath`)下运行。
|
||||
请访问[FastAPI运行说明](docs/WithFastapi.md)
|
||||
|
||||
---
|
||||
# Advanced Usage
|
||||
## 自定义新的便捷按钮 / 自定义函数插件
|
||||
|
||||
1. 自定义新的便捷按钮(学术快捷键)
|
||||
# Advanced Usage
|
||||
### I:自定义新的便捷按钮(学术快捷键)
|
||||
任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
|
||||
例如
|
||||
```
|
||||
@ -214,15 +211,15 @@ docker-compose up
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. 自定义函数插件
|
||||
### II:自定义函数插件
|
||||
|
||||
编写强大的函数插件来执行任何你想得到的和想不到的任务。
|
||||
本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
|
||||
详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
|
||||
|
||||
---
|
||||
|
||||
# Latest Update
|
||||
## 新功能动态
|
||||
### I:新功能动态
|
||||
|
||||
1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件,
|
||||
另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。
|
||||
@ -283,7 +280,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
|
||||
|
||||
|
||||
## 版本:
|
||||
### II:版本:
|
||||
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
|
||||
- version 3.4: +arxiv论文翻译、latex论文批改功能
|
||||
- version 3.3: +互联网信息综合功能
|
||||
@ -305,7 +302,7 @@ gpt_academic开发者QQ群-2:610599535
|
||||
- 某些浏览器翻译插件干扰此软件前端的运行
|
||||
- 官方Gradio目前有很多兼容性Bug,请务必使用`requirement.txt`安装Gradio
|
||||
|
||||
## 参考与学习
|
||||
### III:参考与学习
|
||||
|
||||
```
|
||||
代码中参考了很多其他优秀项目中的设计,顺序不分先后:
|
||||
|
||||
486
__main__.py
486
__main__.py
@ -1,486 +0,0 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from request_llm.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_user_upload, \
|
||||
get_conf, ArgsGeneralWrapper, DummyWith
|
||||
|
||||
# 问询记录, python 版本建议3.9+(越新越好)
|
||||
import logging
|
||||
|
||||
# 一些普通功能模块
|
||||
from core_functional import get_core_functions
|
||||
|
||||
functional = get_core_functions()
|
||||
|
||||
# 高级函数插件
|
||||
from crazy_functional import get_crazy_functions
|
||||
|
||||
crazy_fns = get_crazy_functions()
|
||||
|
||||
# 处理markdown文本格式的转变
|
||||
gr.Chatbot.postprocess = format_io
|
||||
|
||||
# 做一些外观色彩上的调整
|
||||
from theme import adjust_theme, advanced_css, custom_css
|
||||
|
||||
set_theme = adjust_theme()
|
||||
|
||||
# 代理与自动更新
|
||||
from check_proxy import check_proxy, auto_update, warm_up_modules
|
||||
|
||||
import func_box
|
||||
|
||||
from check_proxy import get_current_version
|
||||
|
||||
os.makedirs("gpt_log", exist_ok=True)
|
||||
try:
|
||||
logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
||||
except:
|
||||
logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
||||
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
||||
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, LAYOUT, API_KEY, AVAIL_LLM_MODELS, LOCAL_PORT= \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'LAYOUT',
|
||||
'API_KEY', 'AVAIL_LLM_MODELS', 'LOCAL_PORT')
|
||||
|
||||
proxy_info = check_proxy(proxies)
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
if not AUTHENTICATION: AUTHENTICATION = None
|
||||
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
|
||||
|
||||
class ChatBotFrame:
|
||||
|
||||
def __init__(self):
|
||||
self.cancel_handles = []
|
||||
self.initial_prompt = "You will play a professional to answer me according to my needs."
|
||||
self.title_html = f"<h1 align=\"center\">Chatbot for KSO {get_current_version()}</h1>"
|
||||
self.description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||
|
||||
|
||||
class ChatBot(ChatBotFrame):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.__url = f'http://{func_box.ipaddr()}:{PORT}'
|
||||
# self.__gr_url = gr.State(self.__url)
|
||||
|
||||
def draw_title(self):
|
||||
# self.title = gr.HTML(self.title_html)
|
||||
self.cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL, 'local': self.__url})
|
||||
def draw_chatbot(self):
|
||||
self.chatbot = gr.Chatbot(elem_id='main_chatbot', label=f"当前模型:{LLM_MODEL}")
|
||||
self.chatbot.style()
|
||||
self.history = gr.State([])
|
||||
temp_draw = [gr.HTML() for i in range(7)]
|
||||
with gr.Box(elem_id='chat_box'):
|
||||
self.state_users = gr.HTML(value='', visible=False, elem_id='state_users')
|
||||
with gr.Row():
|
||||
self.sm_upload = gr.UploadButton(label='UPLOAD', file_count='multiple', elem_classes='sm_btn').style(size='sm', full_width=False)
|
||||
self.sm_code_block = gr.Button(value='CODE', elem_classes='sm_btn').style(size='sm', full_width=False)
|
||||
self.sm_upload_history = gr.Button("SPASE", variant="primary", elem_classes='sm_btn').style(size='sm', full_width=False)
|
||||
self.md_dropdown = gr.Dropdown(choices=AVAIL_LLM_MODELS, value=LLM_MODEL,
|
||||
show_label=False, interactive=True,
|
||||
elem_classes='sm_select', elem_id='change-font-size').style(container=False)
|
||||
gr.HTML(func_box.get_html("appearance_switcher.html").format(label=""), elem_id='user_input_tb', elem_classes="insert_block")
|
||||
|
||||
with gr.Row():
|
||||
self.txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_classes='chat_input').style(container=False)
|
||||
self.input_copy = gr.State('')
|
||||
self.submitBtn = gr.Button("", variant="primary", elem_classes='submit_btn').style(full_width=False)
|
||||
with gr.Row():
|
||||
self.status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行\n {proxy_info}", elem_id='debug_mes')
|
||||
|
||||
def signals_sm_btn(self):
|
||||
self.sm_upload.upload(on_file_uploaded, [self.sm_upload, self.chatbot, self.txt], [self.chatbot, self.txt]).then(
|
||||
fn=lambda: [gr.Tabs.update(selected='plug_tab'), gr.Column.update(visible=False)], inputs=None, outputs=[self.tabs_inputs, self.examples_column]
|
||||
)
|
||||
self.sm_code_block.click(fn=lambda x: x+'```\n\n```', inputs=[self.txt], outputs=[self.txt])
|
||||
self.sm_upload_history.click(get_user_upload, [self.chatbot], outputs=[self.chatbot]).then(fn=lambda: gr.Column.update(visible=False), inputs=None, outputs=self.examples_column)
|
||||
# self.sm_select_font.select(fn=lambda x: gr.HTML.update(value=f"{x}px"), inputs=[self.sm_select_font], outputs=[self.state_users])
|
||||
|
||||
def draw_examples(self):
|
||||
with gr.Column(elem_id='examples_col') as self.examples_column:
|
||||
gr.Markdown('# Get Started Quickly')
|
||||
with gr.Row():
|
||||
hide_components = gr.Textbox(visible=False)
|
||||
gr.Button.update = func_box.update_btn
|
||||
self.example = [['今天伦敦天气怎么样?', '对2021年以后的世界和事件了解有限', self.submitBtn.update(elem_id='highlight_update')],
|
||||
['今夕何夕,明月何月?', '偶尔会产生不正确的信息', self.submitBtn.update(elem_id='highlight_update')],
|
||||
['怎么才能把学校给炸了?', '经过训练,会拒绝不适当的请求', self.submitBtn.update(elem_id='highlight_update')]]
|
||||
self.example_inputs = [self.txt, hide_components, self.submitBtn]
|
||||
self.guidance_example = gr.Examples(examples=self.example, inputs=self.example_inputs, label='基础对话')
|
||||
self.guidance_plugins = gr.Dataset(components=[gr.HTML(visible=False)], samples=[['...'] for i in range(4)], label='高级功能', type='index')
|
||||
self.guidance_plugins_state = gr.State()
|
||||
self.guidance_news = gr.Examples(examples=func_box.git_log_list(), inputs=[hide_components, hide_components], label='News')
|
||||
|
||||
def plug_update(index, date_set):
|
||||
variant = crazy_fns[date_set[index]]["Color"] if "Color" in crazy_fns[date_set[index]] else "secondary"
|
||||
ret = {self.switchy_bt: self.switchy_bt.update(value=date_set[index], variant=variant, elem_id='highlight_update'),
|
||||
self.tabs_inputs: gr.Tabs.update(selected='plug_tab'),
|
||||
self.area_crazy_fn: self.area_crazy_fn.update(open=True)}
|
||||
fns_value = func_box.txt_converter_json(str(crazy_fns[date_set[index]].get('Parameters', '')))
|
||||
fns_lable = f"插件[{date_set[index]}]的高级参数说明:\n" + crazy_fns[date_set[index]].get("ArgsReminder", f"没有提供高级参数功能说明")
|
||||
temp_dict = dict(visible=True, interactive=True, value=str(fns_value), label=fns_lable)
|
||||
# 是否唤起高级插件参数区
|
||||
if crazy_fns[date_set[index]].get("AdvancedArgs", False):
|
||||
ret.update({self.plugin_advanced_arg: gr.update(**temp_dict)})
|
||||
ret.update({self.area_crazy_fn: self.area_crazy_fn.update(open=False)})
|
||||
else:
|
||||
ret.update({self.plugin_advanced_arg: gr.update(visible=False, label=f"插件[{date_set[index]}]不需要高级参数。")})
|
||||
return ret
|
||||
|
||||
self.guidance_plugins.select(fn=plug_update, inputs=[self.guidance_plugins, self.guidance_plugins_state],
|
||||
outputs=[self.switchy_bt, self.plugin_advanced_arg, self.tabs_inputs,
|
||||
self.area_crazy_fn])
|
||||
|
||||
def __clear_input(self, inputs):
|
||||
return '', inputs, self.examples_column.update(visible=False)
|
||||
|
||||
def draw_prompt(self):
|
||||
with gr.Row():
|
||||
self.pro_search_txt = gr.Textbox(show_label=False, placeholder="Enter the prompt you want.").style(
|
||||
container=False)
|
||||
self.pro_entry_btn = gr.Button("搜索", variant="primary").style(full_width=False, size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion(label='Prompt usage frequency'):
|
||||
self.pro_prompt_list = gr.Dataset(components=[gr.HTML(visible=False)], samples_per_page=10,
|
||||
label='Results',
|
||||
samples=[[". . ."] for i in range(20)], type='index')
|
||||
self.pro_prompt_state = gr.State(self.pro_prompt_list)
|
||||
|
||||
def draw_temp_edit(self):
|
||||
with gr.Box():
|
||||
with gr.Row():
|
||||
with gr.Column(scale=100):
|
||||
self.pro_results = gr.Chatbot(label='Prompt and result', elem_id='prompt_result').style()
|
||||
with gr.Column(scale=16):
|
||||
Tips = "用 BORF 分析法设计chat GPT prompt:\n" \
|
||||
"1、阐述背景 B(Background): 说明背景,为chatGPT提供充足的信息\n" \
|
||||
"2、定义目标 O(Objectives):“我们希望实现什么”\n" \
|
||||
"3、定义关键结果 R(key Result):“我要什么具体效果”\n" \
|
||||
"4、试验并调整,改进 E(Evolve):三种改进方法自由组合\n" \
|
||||
"\t 改进输入:从答案的不足之处着手改进背景B,目标O与关键结果R\n" \
|
||||
"\t 改进答案:在后续对话中指正chatGPT答案缺点\n" \
|
||||
"\t 重新生成:尝试在prompt不变的情况下多次生成结果,优中选优\n" \
|
||||
"\t 熟练使用占位符{{{v}}}: 当Prompt存在占位符,则优先将{{{v}}}替换为预期文本"
|
||||
self.pro_edit_txt = gr.Textbox(show_label=False, info='Prompt编辑区', lines=14,
|
||||
placeholder=Tips).style(container=False)
|
||||
with gr.Row():
|
||||
self.pro_name_txt = gr.Textbox(show_label=False, placeholder='是否全复用prompt / prompt功能名', ).style(
|
||||
container=False)
|
||||
self.pro_new_btn = gr.Button("保存Prompt", variant="primary").style(size='sm').style()
|
||||
with gr.Row(elem_id='sm_btn'):
|
||||
self.pro_reuse_btn = gr.Button("复用Result", variant="secondary").style(size='sm').style(full_width=False)
|
||||
self.pro_clear_btn = gr.Button("重置Result", variant="stop").style(size='sm').style(full_width=False)
|
||||
|
||||
|
||||
def signals_prompt_edit(self):
|
||||
self.pro_clear_btn.click(fn=lambda: [], inputs=None, outputs=self.pro_results)
|
||||
self.prompt_tab.select(fn=func_box.draw_results,
|
||||
inputs=[self.pro_search_txt, self.pro_prompt_state, self.pro_tf_slider,
|
||||
self.pro_private_check],
|
||||
outputs=[self.pro_prompt_list, self.pro_prompt_state])
|
||||
self.pro_search_txt.submit(fn=func_box.draw_results,
|
||||
inputs=[self.pro_search_txt, self.pro_prompt_state, self.pro_tf_slider,
|
||||
self.pro_private_check],
|
||||
outputs=[self.pro_prompt_list, self.pro_prompt_state])
|
||||
self.pro_entry_btn.click(fn=func_box.draw_results,
|
||||
inputs=[self.pro_search_txt, self.pro_prompt_state, self.pro_tf_slider,
|
||||
self.pro_private_check],
|
||||
outputs=[self.pro_prompt_list, self.pro_prompt_state])
|
||||
self.pro_prompt_list.click(fn=func_box.show_prompt_result,
|
||||
inputs=[self.pro_prompt_list, self.pro_prompt_state, self.pro_results, self.pro_edit_txt, self.pro_name_txt],
|
||||
outputs=[self.pro_results, self.pro_edit_txt, self.pro_name_txt])
|
||||
self.pro_new_btn.click(fn=func_box.prompt_save,
|
||||
inputs=[self.pro_edit_txt, self.pro_name_txt, self.pro_fp_state],
|
||||
outputs=[self.pro_edit_txt, self.pro_name_txt, self.pro_private_check,
|
||||
self.pro_func_prompt, self.pro_fp_state, self.tabs_chatbot])
|
||||
self.pro_reuse_btn.click(
|
||||
fn=func_box.reuse_chat,
|
||||
inputs=[self.pro_results, self.chatbot, self.history, self.pro_name_txt, self.txt],
|
||||
outputs=[self.chatbot, self.history, self.txt, self.tabs_chatbot, self.pro_name_txt, self.examples_column]
|
||||
)
|
||||
|
||||
def draw_function_chat(self):
|
||||
prompt_list, devs_document = get_conf('prompt_list', 'devs_document')
|
||||
with gr.TabItem('Function', id='func_tab'):
|
||||
with gr.Accordion("基础功能区", open=False) as self.area_basic_fn:
|
||||
with gr.Row():
|
||||
for k in functional:
|
||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||
functional[k]["Button"] = gr.Button(k, variant=variant)
|
||||
with gr.Accordion("上传你的Prompt", open=False) as self.area_basic_fn:
|
||||
jump_link = f'<a href="{devs_document}" target="_blank">Developer Documentation</a>'
|
||||
self.pro_devs_link = gr.HTML(jump_link)
|
||||
self.pro_upload_btn = gr.File(file_count='single', file_types=['.yaml', '.json'],
|
||||
label=f'上传你的Prompt文件, 编写格式请遵循上述开发者文档', )
|
||||
self.pro_private_check = gr.CheckboxGroup(choices=prompt_list['key'], value=prompt_list['value'],
|
||||
label='选择展示Prompt')
|
||||
self.pro_func_prompt = gr.Dataset(components=[gr.HTML()], label="Prompt List", visible=False,
|
||||
samples=[['...', ""] for i in range(20)], type='index',
|
||||
samples_per_page=10)
|
||||
self.pro_fp_state = gr.State(self.pro_func_prompt)
|
||||
|
||||
def signals_prompt_func(self):
|
||||
self.pro_private_check.select(fn=func_box.prompt_reduce,
|
||||
inputs=[self.pro_private_check, self.pro_fp_state],
|
||||
outputs=[self.pro_func_prompt, self.pro_fp_state, self.pro_private_check])
|
||||
self.tabs_code = gr.State(0)
|
||||
self.pro_func_prompt.select(fn=func_box.prompt_input,
|
||||
inputs=[self.txt, self.pro_edit_txt, self.pro_name_txt, self.pro_func_prompt, self.pro_fp_state, self.tabs_code],
|
||||
outputs=[self.txt, self.pro_edit_txt, self.pro_name_txt])
|
||||
self.pro_upload_btn.upload(fn=func_box.prompt_upload_refresh,
|
||||
inputs=[self.pro_upload_btn, self.pro_prompt_state],
|
||||
outputs=[self.pro_func_prompt, self.pro_prompt_state, self.pro_private_check])
|
||||
self.chat_tab.select(fn=lambda: 0, inputs=None, outputs=self.tabs_code)
|
||||
self.prompt_tab.select(fn=lambda: 1, inputs=None, outputs=self.tabs_code)
|
||||
|
||||
def draw_public_chat(self):
|
||||
with gr.TabItem('Plugins', id='plug_tab'):
|
||||
with gr.Accordion("上传本地文件可供高亮函数插件调用", open=False) as self.area_file_up:
|
||||
self.file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)",
|
||||
file_count="multiple")
|
||||
self.file_upload.style()
|
||||
with gr.Accordion("函数插件区", open=True) as self.area_crazy_fn:
|
||||
with gr.Row():
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
self.variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
crazy_fns[k]["Button"] = gr.Button(k, variant=self.variant)
|
||||
crazy_fns[k]["Button"].style(size="sm")
|
||||
with gr.Accordion("更多函数插件/高级用法", open=True, ):
|
||||
dropdown_fn_list = []
|
||||
for k in crazy_fns.keys():
|
||||
if not crazy_fns[k].get("AsButton", True):
|
||||
dropdown_fn_list.append(k)
|
||||
elif crazy_fns[k].get('AdvancedArgs', False):
|
||||
dropdown_fn_list.append(k)
|
||||
self.dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", show_label=False, label="").style(
|
||||
container=False)
|
||||
self.plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
||||
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
||||
self.switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
||||
|
||||
def draw_setting_chat(self):
|
||||
switch_model = get_conf('switch_model')[0]
|
||||
with gr.TabItem('Settings', id='sett_tab'):
|
||||
self.top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01, interactive=True,
|
||||
label="Top-p (nucleus sampling)", ).style(container=False)
|
||||
self.temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
|
||||
label="Temperature", ).style(container=False)
|
||||
self.max_length_sl = gr.Slider(minimum=256, maximum=4096, value=4096, step=1, interactive=True,
|
||||
label="MaxLength", ).style(container=False)
|
||||
self.pro_tf_slider = gr.Slider(minimum=0.01, maximum=1.0, value=0.70, step=0.01, interactive=True,
|
||||
label="Term Frequency系数").style(container=False)
|
||||
self.models_box = gr.CheckboxGroup(choices=switch_model['key'], value=switch_model['value'], label="对话模式")
|
||||
self.system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt",
|
||||
label="System prompt", value=self.initial_prompt)
|
||||
# temp = gr.Markdown(self.description)
|
||||
|
||||
def draw_goals_auto(self):
|
||||
with gr.Row():
|
||||
self.ai_name = gr.Textbox(show_label=False, placeholder="给Ai一个名字").style(container=False)
|
||||
with gr.Row():
|
||||
self.ai_role = gr.Textbox(lines=5, show_label=False, placeholder="请输入你的需求").style(
|
||||
container=False)
|
||||
with gr.Row():
|
||||
self.ai_goal_list = gr.Dataframe(headers=['Goals'], interactive=True, row_count=4,
|
||||
col_count=(1, 'fixed'), type='array')
|
||||
with gr.Row():
|
||||
self.ai_budget = gr.Number(show_label=False, value=0.0,
|
||||
info="关于本次项目的预算,超过预算自动停止,默认无限").style(container=False)
|
||||
|
||||
|
||||
def draw_next_auto(self):
|
||||
with gr.Row():
|
||||
self.text_continue = gr.Textbox(visible=False, show_label=False,
|
||||
placeholder="请根据提示输入执行命令").style(container=False)
|
||||
with gr.Row():
|
||||
self.submit_start = gr.Button("Start", variant='primary')
|
||||
self.submit_next = gr.Button("Next", visible=False, variant='primary')
|
||||
self.submit_stop = gr.Button("Stop", variant="stop")
|
||||
self.agent_obj = gr.State({'obj': None, "start": self.submit_start,
|
||||
"next": self.submit_next, "text": self.text_continue})
|
||||
|
||||
|
||||
def signals_input_setting(self):
|
||||
# 注册input
|
||||
self.input_combo = [self.cookies, self.max_length_sl, self.md_dropdown,
|
||||
self.input_copy, self.top_p, self.temperature, self.chatbot, self.history,
|
||||
self.system_prompt, self.models_box, self.plugin_advanced_arg]
|
||||
self.output_combo = [self.cookies, self.chatbot, self.history, self.status]
|
||||
self.predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=self.input_combo, outputs=self.output_combo)
|
||||
self.clear_agrs = dict(fn=self.__clear_input, inputs=[self.txt], outputs=[self.txt, self.input_copy,
|
||||
self.examples_column])
|
||||
# 提交按钮、重置按钮
|
||||
self.cancel_handles.append(self.txt.submit(**self.clear_agrs).then(**self.predict_args))
|
||||
self.cancel_handles.append(self.submitBtn.click(**self.clear_agrs).then(**self.predict_args))
|
||||
# self.cpopyBtn.click(fn=func_box.copy_result, inputs=[self.history], outputs=[self.status])
|
||||
self.resetBtn.click(lambda: ([], [], "已重置"), None, [self.chatbot, self.history, self.status])
|
||||
|
||||
def signals_function(self):
|
||||
# 基础功能区的回调函数注册
|
||||
for k in functional:
|
||||
self.click_handle = functional[k]["Button"].click(**self.clear_agrs).then(fn=ArgsGeneralWrapper(predict),
|
||||
inputs=[*self.input_combo, gr.State(True), gr.State(k)],
|
||||
outputs=self.output_combo)
|
||||
self.cancel_handles.append(self.click_handle)
|
||||
|
||||
def signals_public(self):
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
self.file_upload.upload(on_file_uploaded, [self.file_upload, self.chatbot, self.txt], [self.chatbot, self.txt])
|
||||
# 函数插件-固定按钮区
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
self.click_handle = crazy_fns[k]["Button"].click(**self.clear_agrs).then(
|
||||
ArgsGeneralWrapper(crazy_fns[k]["Function"]),
|
||||
[*self.input_combo, gr.State(PORT), gr.State(crazy_fns[k].get('Parameters', False))],
|
||||
self.output_combo)
|
||||
self.click_handle.then(on_report_generated, [self.cookies, self.file_upload, self.chatbot],
|
||||
[self.cookies, self.file_upload, self.chatbot])
|
||||
# self.click_handle.then(fn=lambda x: '', inputs=[], outputs=self.txt)
|
||||
self.cancel_handles.append(self.click_handle)
|
||||
|
||||
# 函数插件-下拉菜单与随变按钮的互动
|
||||
def on_dropdown_changed(k):
|
||||
# 按钮颜色随变
|
||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
ret = {self.switchy_bt: self.switchy_bt.update(value=k, variant=variant)}
|
||||
# 参数取随变
|
||||
fns_value = func_box.txt_converter_json(str(crazy_fns[k].get('Parameters', '')))
|
||||
fns_lable = f"插件[{k}]的高级参数说明:\n" + crazy_fns[k].get("ArgsReminder", f"没有提供高级参数功能说明")
|
||||
temp_dict = dict(visible=True, interactive=True, value=str(fns_value), label=fns_lable)
|
||||
# 是否唤起高级插件参数区
|
||||
if crazy_fns[k].get("AdvancedArgs", False):
|
||||
ret.update({self.plugin_advanced_arg: gr.update(**temp_dict)})
|
||||
else:
|
||||
ret.update({self.plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
||||
return ret
|
||||
|
||||
self.dropdown.select(on_dropdown_changed, [self.dropdown], [self.switchy_bt, self.plugin_advanced_arg])
|
||||
|
||||
# 随变按钮的回调函数注册
|
||||
def route(k, ipaddr: gr.Request, *args, **kwargs):
|
||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||
append = list(args)
|
||||
append[-2] = func_box.txt_converter_json(append[-2])
|
||||
append.insert(-1, ipaddr)
|
||||
args = tuple(append)
|
||||
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
||||
|
||||
self.click_handle = self.switchy_bt.click(**self.clear_agrs).then(route, [self.switchy_bt, *self.input_combo, gr.State(PORT)], self.output_combo)
|
||||
self.click_handle.then(on_report_generated, [self.cookies, self.file_upload, self.chatbot],
|
||||
[self.cookies, self.file_upload, self.chatbot])
|
||||
self.cancel_handles.append(self.click_handle)
|
||||
# 终止按钮的回调函数注册
|
||||
self.stopBtn.click(fn=None, inputs=None, outputs=None, cancels=self.cancel_handles)
|
||||
|
||||
def on_md_dropdown_changed(k):
|
||||
return {self.chatbot: gr.update(label="当前模型:" + k)}
|
||||
|
||||
self.md_dropdown.select(on_md_dropdown_changed, [self.md_dropdown], [self.chatbot])
|
||||
|
||||
def signals_auto_input(self):
|
||||
self.auto_input_combo = [self.ai_name, self.ai_role, self.ai_goal_list, self.ai_budget,
|
||||
self.cookies, self.chatbot, self.history,
|
||||
self.agent_obj]
|
||||
self.auto_output_combo = [self.cookies, self.chatbot, self.history, self.status,
|
||||
self.agent_obj, self.submit_start, self.submit_next, self.text_continue]
|
||||
|
||||
|
||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
def auto_opentab_delay(self, is_open=False):
|
||||
import threading, webbrowser, time
|
||||
|
||||
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||
print(f"\t(亮色主题): http://localhost:{PORT}")
|
||||
print(f"\t(暗色主题): {self.__url}/?__theme=dark")
|
||||
if is_open:
|
||||
def open():
|
||||
time.sleep(2) # 打开浏览器
|
||||
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__theme=dark")
|
||||
|
||||
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
||||
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
||||
# threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
||||
|
||||
|
||||
def main(self):
|
||||
|
||||
with gr.Blocks(title="Chatbot for KSO ", theme=set_theme, analytics_enabled=False, css=custom_css) as self.demo:
|
||||
# 绘制页面title
|
||||
self.draw_title()
|
||||
# 绘制一个ROW,row会让底下的元素自动排成一行
|
||||
with gr.Row().style(justify='between'):
|
||||
# 绘制列1
|
||||
with gr.Column(scale=44):
|
||||
with gr.Tabs() as self.tabs_copilot:
|
||||
# 绘制对话模组
|
||||
with gr.TabItem('Chat-Copilot'):
|
||||
with gr.Row():
|
||||
# self.cpopyBtn = gr.Button("复制回答", variant="secondary").style(size="sm")
|
||||
self.resetBtn = gr.Button("新建对话", variant="primary", elem_id='empty_btn').style(
|
||||
size="sm")
|
||||
self.stopBtn = gr.Button("中止对话", variant="stop").style(size="sm")
|
||||
with gr.Tabs() as self.tabs_inputs:
|
||||
self.draw_function_chat()
|
||||
self.draw_public_chat()
|
||||
self.draw_setting_chat()
|
||||
|
||||
# 绘制autogpt模组
|
||||
with gr.TabItem('Auto-GPT'):
|
||||
self.draw_next_auto()
|
||||
self.draw_goals_auto()
|
||||
# 绘制列2
|
||||
with gr.Column(scale=100):
|
||||
with gr.Tabs() as self.tabs_chatbot:
|
||||
with gr.TabItem('Chatbot', id='chatbot') as self.chat_tab:
|
||||
# self.draw_chatbot()
|
||||
pass
|
||||
with gr.TabItem('Prompt检索/编辑') as self.prompt_tab:
|
||||
self.draw_prompt()
|
||||
|
||||
with self.chat_tab: # 使用 gr.State()对组件进行拷贝时,如果之前绘制了Markdown格式,会导致启动崩溃,所以将 markdown相关绘制放在最后
|
||||
self.draw_chatbot()
|
||||
self.draw_examples()
|
||||
with self.prompt_tab:
|
||||
self.draw_temp_edit()
|
||||
# 函数注册,需要在Blocks下进行
|
||||
self.signals_sm_btn()
|
||||
self.signals_input_setting()
|
||||
self.signals_function()
|
||||
self.signals_prompt_func()
|
||||
self.signals_public()
|
||||
self.signals_prompt_edit()
|
||||
# self.signals_auto_input()
|
||||
adv_plugins = gr.State([i for i in crazy_fns])
|
||||
self.demo.load(fn=func_box.refresh_load_data, postprocess=False,
|
||||
inputs=[self.chatbot, self.history, self.pro_fp_state, adv_plugins],
|
||||
outputs=[self.pro_func_prompt, self.pro_fp_state, self.chatbot, self.history, self.guidance_plugins, self.guidance_plugins_state])
|
||||
|
||||
# Start
|
||||
self.auto_opentab_delay()
|
||||
self.demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION,
|
||||
blocked_paths=["config.py", "config_private.py", "docker-compose.yml", "Dockerfile"])
|
||||
|
||||
|
||||
def check_proxy_free():
|
||||
proxy_state = func_box.Shell(f'lsof -i :{PORT}').read()[1].splitlines()
|
||||
if proxy_state != ["", ""]:
|
||||
print('Kill Old Server')
|
||||
for i in proxy_state[1:]:
|
||||
func_box.Shell(f'kill -9 {i.split()[1]}').read()
|
||||
import time
|
||||
time.sleep(5)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
PORT = LOCAL_PORT if WEB_PORT <= 0 else WEB_PORT
|
||||
check_proxy_free()
|
||||
ChatBot().main()
|
||||
gr.close_all()
|
||||
check_proxy_free()
|
||||
|
||||
@ -11,7 +11,9 @@ def check_proxy(proxies):
|
||||
country = data['country_name']
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
||||
elif 'error' in data:
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:未知"
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
|
||||
else:
|
||||
result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}"
|
||||
print(result)
|
||||
return result
|
||||
except:
|
||||
|
||||
112
config.py
112
config.py
@ -1,36 +1,27 @@
|
||||
# [step 1]>> 例如: API_KEY = "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" (此key无效)
|
||||
"""
|
||||
以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。
|
||||
读取优先级:环境变量 > config_private.py > config.py
|
||||
--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
|
||||
All the following configurations also support using environment variables to override,
|
||||
and the environment variable configuration format can be seen in docker-compose.yml.
|
||||
Configuration reading priority: environment variable > config_private.py > config.py
|
||||
"""
|
||||
|
||||
# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
||||
API_KEY = "sk-此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
|
||||
|
||||
|
||||
|
||||
prompt_list = {'key': ['所有人', '个人'], 'value': []}
|
||||
|
||||
switch_model = {'key': ['input加密', '隐私模式'], 'value': ['input加密']}
|
||||
|
||||
private_key = 'uhA51pHtjisfjij'
|
||||
|
||||
import func_box
|
||||
import os
|
||||
devs_document = "/file="+os.path.join(func_box.base_path, 'README.md')
|
||||
|
||||
#增加关于AZURE的配置信息, 可以在AZURE网页中找到
|
||||
AZURE_ENDPOINT = "https://你的api名称.openai.azure.com/"
|
||||
AZURE_API_KEY = "填入azure openai api的密钥"
|
||||
AZURE_API_VERSION = "填入api版本"
|
||||
AZURE_ENGINE = "填入ENGINE"
|
||||
|
||||
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
|
||||
USE_PROXY = False
|
||||
|
||||
LOCAL_PORT = 7891
|
||||
if USE_PROXY:
|
||||
# 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
||||
# 例如 "socks5h://localhost:11284"
|
||||
# [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
||||
# [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
||||
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
||||
|
||||
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
||||
"""
|
||||
填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
||||
<配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1>
|
||||
[协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
||||
[地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
||||
[端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
||||
"""
|
||||
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5h / http)、地址(localhost)和端口(11284)
|
||||
proxies = {
|
||||
# [协议]:// [地址] :[端口]
|
||||
"http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
|
||||
@ -39,78 +30,78 @@ if USE_PROXY:
|
||||
else:
|
||||
proxies = None
|
||||
|
||||
# [step 3]>> 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
||||
# 一言以蔽之:免费用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
||||
# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------
|
||||
|
||||
# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!! 高危设置!通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
||||
# 格式 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||
# 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions":"https://reverse-proxy-url/v1/chat/completions"}
|
||||
API_URL_REDIRECT = {}
|
||||
|
||||
|
||||
# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
||||
# 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
||||
DEFAULT_WORKER_NUM = 3
|
||||
|
||||
|
||||
# [step 3]>> 以下配置可以优化体验,但大部分场合下并不需要修改 # 废弃了,移步到theme.py 的 #main_chatbot中修改
|
||||
# 对话窗的高度
|
||||
CHATBOT_HEIGHT = 1115
|
||||
|
||||
# 主题
|
||||
THEME = "Default"
|
||||
|
||||
# 代码高亮
|
||||
CODE_HIGHLIGHT = True
|
||||
|
||||
|
||||
# 窗口布局
|
||||
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
DARK_MODE = True # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
DARK_MODE = True # 暗色模式 / 亮色模式
|
||||
|
||||
|
||||
# 发送请求到OpenAI后,等待多久判定为超时
|
||||
TIMEOUT_SECONDS = 30
|
||||
|
||||
|
||||
# 网页的端口, -1代表随机端口
|
||||
WEB_PORT = -1
|
||||
|
||||
|
||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||
MAX_RETRY = 2
|
||||
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt35", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"]
|
||||
# P.S. 其他可用的模型还包括 ["newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
||||
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
|
||||
|
||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||
|
||||
# OpenAI的API_URL
|
||||
API_URL = "https://api.openai.com/v1/chat/completions"
|
||||
PROXY_API_URL = '' # 你的网关应用
|
||||
|
||||
# 设置gradio的并行线程数(不需要修改)
|
||||
CONCURRENT_COUNT = 100
|
||||
|
||||
|
||||
# 是否在提交时自动清空输入框
|
||||
AUTO_CLEAR_TXT = False
|
||||
|
||||
|
||||
# 加一个live2d装饰
|
||||
ADD_WAIFU = False
|
||||
|
||||
# 川虎JS
|
||||
ADD_CHUANHU = True
|
||||
|
||||
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
||||
# [("username", "password"), ("username2", "password2"), ...]
|
||||
AUTHENTICATION = []
|
||||
|
||||
# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
|
||||
# (高危设置!通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
||||
# 格式 {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||
# 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://ai.open.com/api/conversation"}
|
||||
API_URL_REDIRECT = {}
|
||||
|
||||
# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
|
||||
CUSTOM_PATH = "/"
|
||||
|
||||
# 如果需要使用newbing,把newbing的长长的cookie放到这里
|
||||
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||
# 从现在起,如果您调用"newbing-free"模型,则无需填写NEWBING_COOKIES
|
||||
NEWBING_COOKIES = """
|
||||
your bing cookies here
|
||||
"""
|
||||
|
||||
# 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用
|
||||
API_ORG = ""
|
||||
|
||||
|
||||
# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md
|
||||
SLACK_CLAUDE_BOT_ID = ''
|
||||
@ -118,7 +109,14 @@ SLACK_CLAUDE_USER_TOKEN = ''
|
||||
|
||||
|
||||
# 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md
|
||||
AZURE_ENDPOINT = "https://你的api名称.openai.azure.com/"
|
||||
AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
|
||||
AZURE_API_KEY = "填入azure openai api的密钥"
|
||||
AZURE_API_VERSION = "填入api版本"
|
||||
AZURE_ENGINE = "填入ENGINE"
|
||||
AZURE_API_VERSION = "2023-05-15" # 一般不修改
|
||||
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
|
||||
|
||||
|
||||
# 使用Newbing
|
||||
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||
NEWBING_COOKIES = """
|
||||
put your new bing cookies here
|
||||
"""
|
||||
|
||||
@ -61,7 +61,7 @@ def get_core_functions():
|
||||
},
|
||||
"找图片": {
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n",
|
||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
@ -76,11 +76,3 @@ def get_core_functions():
|
||||
"Suffix": r"",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_guidance():
|
||||
pass
|
||||
|
||||
|
||||
def get_guidance():
|
||||
pass
|
||||
@ -20,28 +20,19 @@ def get_crazy_functions():
|
||||
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个CSharp项目
|
||||
from crazy_functions.总结word文档 import 总结word文档
|
||||
from crazy_functions.辅助回答 import 猜你想问
|
||||
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
|
||||
from crazy_functions.对话历史存档 import 对话历史存档
|
||||
from crazy_functions.对话历史存档 import 载入对话历史存档
|
||||
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
|
||||
|
||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
||||
function_plugins = {
|
||||
"猜你想问": {
|
||||
"Function": HotReload(猜你想问)
|
||||
},
|
||||
"解析整个Python项目": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"AsButton": False,
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(解析一个Python项目)
|
||||
},
|
||||
|
||||
"保存当前的对话": {
|
||||
"AsButton": True,
|
||||
"Function": HotReload(对话历史存档)
|
||||
},
|
||||
"载入对话历史存档(先上传存档或输入路径)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton":False,
|
||||
"Function": HotReload(载入对话历史存档)
|
||||
},
|
||||
@ -49,78 +40,77 @@ def get_crazy_functions():
|
||||
"AsButton":False,
|
||||
"Function": HotReload(删除所有本地对话历史记录)
|
||||
},
|
||||
|
||||
"[测试功能] 解析Jupyter Notebook文件": {
|
||||
"Color": "primary",
|
||||
"AsButton": False,
|
||||
"Color": "stop",
|
||||
"AsButton":False,
|
||||
"Function": HotReload(解析ipynb文件),
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"AsButton": False,
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"Function": HotReload(总结word文档)
|
||||
},
|
||||
"解析整个C++项目头文件": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个C项目的头文件)
|
||||
},
|
||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个C项目)
|
||||
},
|
||||
"解析整个Go项目": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Golang项目)
|
||||
},
|
||||
"解析整个Rust项目": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Rust项目)
|
||||
},
|
||||
"解析整个Java项目": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Java项目)
|
||||
},
|
||||
"解析整个前端项目(js,ts,css等)": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个前端项目)
|
||||
},
|
||||
"解析整个Lua项目": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Lua项目)
|
||||
},
|
||||
"解析整个CSharp项目": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个CSharp项目)
|
||||
},
|
||||
"读Tex论文写摘要": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(读文章写摘要)
|
||||
},
|
||||
"Markdown/Readme英译中": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"AsButton": False,
|
||||
"Color": "stop",
|
||||
"Function": HotReload(Markdown英译中)
|
||||
},
|
||||
"批量生成函数注释": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(批量生成函数注释)
|
||||
},
|
||||
"保存当前的对话": {
|
||||
"Function": HotReload(对话历史存档)
|
||||
},
|
||||
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||
"Function": HotReload(解析项目本身),
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析项目本身)
|
||||
},
|
||||
# "[老旧的Demo] 把本项目源代码切换成全英文": {
|
||||
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
@ -129,8 +119,7 @@ def get_crazy_functions():
|
||||
# },
|
||||
"[插件demo] 历史上的今天": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(高阶功能模板函数),
|
||||
"AsButton": False,
|
||||
"Function": HotReload(高阶功能模板函数)
|
||||
},
|
||||
|
||||
}
|
||||
@ -149,69 +138,69 @@ def get_crazy_functions():
|
||||
|
||||
function_plugins.update({
|
||||
"批量翻译PDF文档(多线程)": {
|
||||
"Color": "primary",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Color": "stop",
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
},
|
||||
"询问多个GPT模型": {
|
||||
"Color": "primary", # 按钮颜色
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(同时问询)
|
||||
},
|
||||
"[测试功能] 批量总结PDF文档": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(批量总结PDF文档)
|
||||
},
|
||||
# "[测试功能] 批量总结PDF文档pdfminer": {
|
||||
# "Color": "primary",
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Function": HotReload(批量总结PDF文档pdfminer)
|
||||
# },
|
||||
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(谷歌检索小助手)
|
||||
},
|
||||
"理解PDF文档内容 (模仿ChatPDF)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入)
|
||||
},
|
||||
"英文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英文润色)
|
||||
},
|
||||
"英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英文纠错)
|
||||
},
|
||||
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中文润色)
|
||||
},
|
||||
"Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中译英)
|
||||
},
|
||||
"Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英译中)
|
||||
},
|
||||
"批量Markdown中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Markdown中译英)
|
||||
},
|
||||
@ -221,11 +210,12 @@ def get_crazy_functions():
|
||||
|
||||
###################### 第三组插件 ###########################
|
||||
# [第三组插件]: 尚未充分测试的函数插件
|
||||
|
||||
try:
|
||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||
function_plugins.update({
|
||||
"一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(下载arxiv论文并翻译摘要)
|
||||
}
|
||||
@ -237,7 +227,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
function_plugins.update({
|
||||
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(连接网络回答问题)
|
||||
}
|
||||
@ -245,7 +235,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||
function_plugins.update({
|
||||
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(连接bing搜索回答问题)
|
||||
}
|
||||
@ -257,7 +247,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
function_plugins.update({
|
||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||
@ -271,7 +261,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
function_plugins.update({
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
@ -285,7 +275,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.图片生成 import 图片生成
|
||||
function_plugins.update({
|
||||
"图片生成(先切换模型到openai或api2d)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示
|
||||
@ -299,7 +289,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.总结音视频 import 总结音视频
|
||||
function_plugins.update({
|
||||
"批量总结音视频(输入路径或上传压缩包)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
||||
@ -309,51 +299,11 @@ def get_crazy_functions():
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
function_plugins.update({
|
||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
"Color": "primary",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(解析任意code项目)
|
||||
},
|
||||
})
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
function_plugins.update({
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Color": "primary",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型)
|
||||
},
|
||||
})
|
||||
from crazy_functions.图片生成 import 图片生成
|
||||
function_plugins.update({
|
||||
"图片生成(先切换模型到openai或api2d)": {
|
||||
"Color": "primary",
|
||||
"AsButton": True,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "在这里输入分辨率, 如'256x256'(默认), '512x512', '1024x1024'", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(图片生成)
|
||||
},
|
||||
})
|
||||
from crazy_functions.总结音视频 import 总结音视频
|
||||
function_plugins.update({
|
||||
"批量总结音视频(输入路径或上传压缩包)": {
|
||||
"Color": "primary",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
||||
"Function": HotReload(总结音视频)
|
||||
}
|
||||
})
|
||||
try:
|
||||
from crazy_functions.数学动画生成manim import 动画生成
|
||||
function_plugins.update({
|
||||
"数学动画生成(Manim)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(动画生成)
|
||||
}
|
||||
@ -365,7 +315,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
||||
function_plugins.update({
|
||||
"Markdown翻译(手动指定语言)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。",
|
||||
@ -379,7 +329,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.Langchain知识库 import 知识库问答
|
||||
function_plugins.update({
|
||||
"[功能尚不稳定] 构建知识库(请先上传文件素材)": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "待注入的知识库名称id, 默认为default",
|
||||
@ -393,7 +343,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||
function_plugins.update({
|
||||
"[功能尚不稳定] 知识库问答": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库",
|
||||
@ -407,7 +357,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
||||
function_plugins.update({
|
||||
"Latex英文纠错+高亮修正位置 [需Latex]": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
||||
@ -417,22 +367,22 @@ def get_crazy_functions():
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
function_plugins.update({
|
||||
"Arixv翻译(输入arxivID)[需Latex]": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
function_plugins.update({
|
||||
"本地论文翻译(上传Latex压缩包)[需Latex]": {
|
||||
"Color": "primary",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
@ -444,7 +394,7 @@ def get_crazy_functions():
|
||||
# from crazy_functions.虚空终端 import 终端
|
||||
# function_plugins.update({
|
||||
# "超级终端": {
|
||||
# "Color": "primary",
|
||||
# "Color": "stop",
|
||||
# "AsButton": False,
|
||||
# # "AdvancedArgs": True,
|
||||
# # "ArgsReminder": "",
|
||||
@ -454,5 +404,4 @@ def get_crazy_functions():
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
###################### 第n组插件 ###########################
|
||||
return function_plugins
|
||||
|
||||
@ -130,6 +130,11 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
||||
return final_result
|
||||
|
||||
def can_multi_process(llm):
|
||||
if llm.startswith('gpt-'): return True
|
||||
if llm.startswith('api2d-'): return True
|
||||
if llm.startswith('azure-'): return True
|
||||
return False
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||
@ -175,16 +180,16 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
except: max_workers = 8
|
||||
if max_workers <= 0: max_workers = 3
|
||||
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
||||
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-') or llm_kwargs['llm_model'].startswith('proxy-gpt')):
|
||||
if not can_multi_process(llm_kwargs['llm_model']):
|
||||
max_workers = 1
|
||||
|
||||
executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||
n_frag = len(inputs_array)
|
||||
# 用户反馈
|
||||
chatbot.append([None, ""])
|
||||
chatbot.append(["请开始多线程操作。", ""])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
# 跨线程传递
|
||||
mutable = [[f"", time.time(), "等待中"] for _ in range(n_frag)]
|
||||
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
||||
|
||||
# 子线程任务
|
||||
def _req_gpt(index, inputs, history, sys_prompt):
|
||||
@ -272,8 +277,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
observe_win.append(print_something_really_funny)
|
||||
# 在前端打印些好玩的东西
|
||||
stat_str = ''.join([f'`{inputs_show_user_array[thread_index][0:5]}...{inputs_show_user_array[thread_index][-5:]}`\t'
|
||||
f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
||||
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
||||
# 在前端打印些好玩的东西
|
||||
|
||||
@ -657,7 +657,6 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
|
||||
|
||||
write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder)
|
||||
|
||||
|
||||
# <-------- 写出文件 ---------->
|
||||
msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。"
|
||||
final_tex = lps.merge_result(pfg.file_result, mode, msg)
|
||||
@ -744,7 +743,6 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||
@ -769,7 +767,6 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||
shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf'))
|
||||
|
||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
return True # 成功啦
|
||||
else:
|
||||
|
||||
@ -27,22 +27,20 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
|
||||
}
|
||||
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
||||
print(response.content)
|
||||
|
||||
try:
|
||||
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
|
||||
except:
|
||||
raise RuntimeError(response.content.decode())
|
||||
|
||||
# 文件保存到本地
|
||||
r = requests.get(image_url, proxies=proxies)
|
||||
file_path = 'gpt_log/image_gen/'
|
||||
os.makedirs(file_path, exist_ok=True)
|
||||
file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
|
||||
with open(file_path + file_name, 'wb+') as f:
|
||||
f.write(r.content)
|
||||
return image_url, file_path + file_name
|
||||
with open(file_path+file_name, 'wb+') as f: f.write(r.content)
|
||||
|
||||
|
||||
return image_url, file_path+file_name
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
|
||||
@ -71,7 +71,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
i_say_show_user = prefix + f'[{index + 1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
@ -53,10 +53,9 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
)
|
||||
iteration_results.append(gpt_say)
|
||||
last_iteration_result = gpt_say
|
||||
|
||||
############################## <第 3 步,整理history> ##################################
|
||||
final_results.extend(iteration_results)
|
||||
# 将摘要添加到历史中,方便"猜你想问"使用
|
||||
history.extend([last_iteration_result])
|
||||
final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。')
|
||||
# 接下来两句话只显示在界面上,不起实际作用
|
||||
i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。"
|
||||
@ -113,4 +112,3 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
|
||||
txt = file_manifest[0]
|
||||
# 开始正式执行任务
|
||||
yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@ -144,13 +144,3 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, )
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import json
|
||||
filename = ''
|
||||
code = parseNotebook(filename)
|
||||
print(code)
|
||||
with open(filename, 'r', encoding='utf-8', errors='replace') as f:
|
||||
notebook = f.read()
|
||||
print(notebook)
|
||||
@ -13,13 +13,8 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
show_say = txt
|
||||
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
||||
else:
|
||||
|
||||
prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。"
|
||||
show_say = '分析上述回答,再列出用户可能提出的三个问题。'
|
||||
try:
|
||||
prompt = history[-1]+f"\n{show_say}"
|
||||
except IndexError:
|
||||
prompt = system_prompt+"\n再列出用户可能提出的三个问题。"
|
||||
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=prompt,
|
||||
inputs_show_user=show_say,
|
||||
@ -28,8 +23,6 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
history=history,
|
||||
sys_prompt=system_prompt
|
||||
)
|
||||
|
||||
chatbot[-1] = (show_say, gpt_say)
|
||||
history.extend([show_say, gpt_say])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@ -1,13 +1,12 @@
|
||||
from toolbox import CatchException, update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
import datetime, re
|
||||
|
||||
import datetime
|
||||
@CatchException
|
||||
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
plugin_kwargs 插件模型的参数,如温度和top_p等,一般原样传递下去就行
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
@ -19,34 +18,12 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
||||
for i in range(5):
|
||||
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
||||
currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
|
||||
i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?用中文列举两条,然后分别给出描述事件的两个英文单词。' + '当你给出关键词时,使用以下json格式:{"KeyWords":[EnglishKeyWord1,EnglishKeyWord2]}。'
|
||||
i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt='输出格式示例:1908年,美国消防救援事业发展的“美国消防协会”成立。关键词:{"KeyWords":["Fire","American"]}。'
|
||||
sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。"
|
||||
)
|
||||
gpt_say = get_images(gpt_say)
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say);history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
|
||||
def get_images(gpt_say):
|
||||
def get_image_by_keyword(keyword):
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
response = requests.get(f'https://wallhaven.cc/search?q={keyword}', timeout=2)
|
||||
for image_element in BeautifulSoup(response.content, 'html.parser').findAll("img"):
|
||||
if "data-src" in image_element: break
|
||||
return image_element["data-src"]
|
||||
|
||||
for keywords in re.findall('{"KeyWords":\[(.*?)\]}', gpt_say):
|
||||
keywords = [n.strip('"') for n in keywords.split(',')]
|
||||
try:
|
||||
description = keywords[0]
|
||||
url = get_image_by_keyword(keywords[0])
|
||||
img_tag = f"\n\n"
|
||||
gpt_say += img_tag
|
||||
except:
|
||||
continue
|
||||
return gpt_say
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 57 KiB |
@ -1,806 +0,0 @@
|
||||
:root {
|
||||
--chatbot-color-light: #000000;
|
||||
--chatbot-color-dark: #FFFFFF;
|
||||
--chatbot-background-color-light: #F3F3F3;
|
||||
--chatbot-background-color-dark: #121111;
|
||||
--message-user-background-color-light: #95EC69;
|
||||
--message-user-background-color-dark: #26B561;
|
||||
--message-bot-background-color-light: #FFFFFF;
|
||||
--message-bot-background-color-dark: #2C2C2C;
|
||||
}
|
||||
mspace {
|
||||
display: block;
|
||||
}
|
||||
@media only screen and (max-width: 767px) {
|
||||
#column_1 {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
@keyframes highlight {
|
||||
0%, 100% {
|
||||
border: 2px solid transparent;
|
||||
}
|
||||
50% {
|
||||
border-color: yellow;
|
||||
}
|
||||
}
|
||||
|
||||
#highlight_update {
|
||||
animation-name: highlight;
|
||||
animation-duration: 0.75s;
|
||||
animation-iteration-count: 3;
|
||||
}
|
||||
|
||||
.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno {
|
||||
border: 0px solid var(--border-color-primary) !important;
|
||||
}
|
||||
|
||||
#examples_col {
|
||||
z-index: 2;
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
margin-bottom: 30% !important;
|
||||
}
|
||||
#hide_examples {
|
||||
z-index: 0;
|
||||
}
|
||||
|
||||
#debug_mes {
|
||||
position: absolute;
|
||||
display: flex;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
z-index: 1; /* 设置更高的 z-index 值 */
|
||||
margin-bottom: -4px !important;
|
||||
align-self: flex-end;
|
||||
}
|
||||
#chat_box {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
overflow-y: visible !important;
|
||||
z-index: 3;
|
||||
flex-grow: 1; /* 自动填充剩余空间 */
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
margin-bottom: 30px !important;
|
||||
border: 1px solid var(--border-color-primary);
|
||||
}
|
||||
.toast-body {
|
||||
z-index: 5 !important;
|
||||
}
|
||||
.chat_input {
|
||||
|
||||
}
|
||||
.sm_btn {
|
||||
position: relative;
|
||||
bottom: 5px;
|
||||
height: 10%;
|
||||
border-radius: 20px!important;
|
||||
min-width: min(10%,100%) !important;
|
||||
overflow: hidden;
|
||||
}
|
||||
.sm_select {
|
||||
position: relative !important;
|
||||
z-index: 5 !important;
|
||||
bottom: 5px;
|
||||
min-width: min(20%,100%) !important;
|
||||
border-radius: 20px!important;
|
||||
}
|
||||
.sm_checkbox {
|
||||
position: relative !important;
|
||||
z-index: 5 !important;
|
||||
bottom: 5px;
|
||||
padding: 0 !important;
|
||||
}
|
||||
.sm_select .wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
|
||||
padding: 0 !important;
|
||||
}
|
||||
.sm_select .block.svelte-mppz8v {
|
||||
width: 10% !important;
|
||||
}
|
||||
|
||||
/* usage_display */
|
||||
.insert_block {
|
||||
position: relative;
|
||||
bottom: 2px;
|
||||
min-width: min(55px,100%) !important;
|
||||
}
|
||||
|
||||
.submit_btn {
|
||||
flex-direction: column-reverse;
|
||||
overflow-y: auto !important;
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
right: 10px;
|
||||
margin-bottom: 10px !important;
|
||||
min-width: min(50px,100%) !important;
|
||||
}
|
||||
|
||||
textarea {
|
||||
resize: none;
|
||||
height: 100%; /* 填充父元素的高度 */
|
||||
}
|
||||
#main_chatbot {
|
||||
height: 75vh !important;
|
||||
max-height: 75vh !important;
|
||||
/* overflow: auto !important; */
|
||||
z-index: 2;
|
||||
transform: translateZ(0) !important;
|
||||
backface-visibility: hidden !important;
|
||||
will-change: transform !important;
|
||||
}
|
||||
#prompt_result{
|
||||
height: 60vh !important;
|
||||
max-height: 60vh !important;
|
||||
}
|
||||
|
||||
#app_title {
|
||||
font-weight: var(--prose-header-text-weight);
|
||||
font-size: var(--text-xxl);
|
||||
line-height: 1.3;
|
||||
text-align: left;
|
||||
margin-top: 6px;
|
||||
white-space: nowrap;
|
||||
}
|
||||
#description {
|
||||
text-align: center;
|
||||
margin: 32px 0 4px 0;
|
||||
}
|
||||
|
||||
/* gradio的页脚信息 */
|
||||
footer {
|
||||
/* display: none !important; */
|
||||
margin-top: .2em !important;
|
||||
font-size: 85%;
|
||||
}
|
||||
#footer {
|
||||
text-align: center;
|
||||
}
|
||||
#footer div {
|
||||
display: inline-block;
|
||||
}
|
||||
#footer .versions{
|
||||
font-size: 85%;
|
||||
opacity: 0.60;
|
||||
}
|
||||
|
||||
#float_display {
|
||||
position: absolute;
|
||||
max-height: 30px;
|
||||
}
|
||||
/* user_info */
|
||||
#user_info {
|
||||
white-space: nowrap;
|
||||
position: absolute; left: 8em; top: .2em;
|
||||
z-index: var(--layer-2);
|
||||
box-shadow: var(--block-shadow);
|
||||
border: none; border-radius: var(--block-label-radius);
|
||||
background: var(--color-accent);
|
||||
padding: var(--block-label-padding);
|
||||
font-size: var(--block-label-text-size); line-height: var(--line-sm);
|
||||
width: auto; min-height: 30px !important;
|
||||
opacity: 1;
|
||||
transition: opacity 0.3s ease-in-out;
|
||||
}
|
||||
textarea.svelte-1pie7s6 {
|
||||
background: #e7e6e6 !important;
|
||||
width: 96% !important;
|
||||
}
|
||||
|
||||
.dark textarea.svelte-1pie7s6 {
|
||||
background: var(--input-background-fill) !important;
|
||||
width: 96% !important;
|
||||
}
|
||||
|
||||
.dark input[type=number].svelte-1cl284s {
|
||||
background: #393939 !important;
|
||||
border: var(--input-border-width) solid var(--input-border-color) !important;
|
||||
}
|
||||
.dark input[type="range"] {
|
||||
background: #393939 !important;
|
||||
}
|
||||
#user_info .wrap {
|
||||
opacity: 0;
|
||||
}
|
||||
#user_info p {
|
||||
color: white;
|
||||
font-weight: var(--block-label-text-weight);
|
||||
}
|
||||
#user_info.hideK {
|
||||
opacity: 0;
|
||||
transition: opacity 1s ease-in-out;
|
||||
}
|
||||
[class *= "message"] {
|
||||
gap: 7px !important;
|
||||
border-radius: var(--radius-xl) !important
|
||||
}
|
||||
/* debug_mes */
|
||||
#debug_mes {
|
||||
min-height: 2em;
|
||||
align-items: flex-end;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
#debug_mes p {
|
||||
font-size: .85em;
|
||||
font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace;
|
||||
/* Windows下中文的monospace会fallback为新宋体,实在太丑,这里折中使用微软雅黑 */
|
||||
color: #000000;
|
||||
}
|
||||
.dark #debug_mes p {
|
||||
color: #ee65ed;
|
||||
}
|
||||
|
||||
#debug_mes {
|
||||
transition: all 0.6s;
|
||||
}
|
||||
#main_chatbot {
|
||||
transition: height 0.3s ease;
|
||||
}
|
||||
|
||||
.wrap.svelte-18telvq.svelte-18telvq {
|
||||
padding: var(--block-padding) !important;
|
||||
height: 100% !important;
|
||||
max-height: 95% !important;
|
||||
overflow-y: auto !important;
|
||||
}
|
||||
.app.svelte-1mya07g.svelte-1mya07g {
|
||||
max-width: 100%;
|
||||
position: relative;
|
||||
/* margin: auto; */
|
||||
padding: var(--size-4);
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.gradio-container-3-32-2 h1 {
|
||||
font-weight: 700 !important;
|
||||
font-size: 28px !important;
|
||||
}
|
||||
|
||||
|
||||
.gradio-container-3-32-2 h2 {
|
||||
font-weight: 600 !important;
|
||||
font-size: 24px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h3 {
|
||||
font-weight: 500 !important;
|
||||
font-size: 20px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h4 {
|
||||
font-weight: 400 !important;
|
||||
font-size: 16px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h5 {
|
||||
font-weight: 300 !important;
|
||||
font-size: 14px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h6 {
|
||||
font-weight: 200 !important;
|
||||
font-size: 12px !important;
|
||||
}
|
||||
|
||||
|
||||
#usage_display p, #usage_display span {
|
||||
margin: 0;
|
||||
font-size: .85em;
|
||||
color: var(--body-text-color-subdued);
|
||||
}
|
||||
.progress-bar {
|
||||
background-color: var(--input-background-fill);;
|
||||
margin: .5em 0 !important;
|
||||
height: 20px;
|
||||
border-radius: 10px;
|
||||
overflow: hidden;
|
||||
}
|
||||
.progress {
|
||||
background-color: var(--block-title-background-fill);
|
||||
height: 100%;
|
||||
border-radius: 10px;
|
||||
text-align: right;
|
||||
transition: width 0.5s ease-in-out;
|
||||
}
|
||||
.progress-text {
|
||||
/* color: white; */
|
||||
color: var(--color-accent) !important;
|
||||
font-size: 1em !important;
|
||||
font-weight: bold;
|
||||
padding-right: 10px;
|
||||
line-height: 20px;
|
||||
}
|
||||
|
||||
.apSwitch {
|
||||
top: 2px;
|
||||
display: inline-block;
|
||||
height: 24px;
|
||||
position: relative;
|
||||
width: 48px;
|
||||
border-radius: 12px;
|
||||
}
|
||||
.apSwitch input {
|
||||
display: none !important;
|
||||
}
|
||||
.apSlider {
|
||||
background-color: var(--neutral-200);
|
||||
bottom: 0;
|
||||
cursor: pointer;
|
||||
left: 0;
|
||||
position: absolute;
|
||||
right: 0;
|
||||
top: 0;
|
||||
transition: .4s;
|
||||
font-size: 18px;
|
||||
border-radius: 7px;
|
||||
}
|
||||
.apSlider::before {
|
||||
bottom: -1.5px;
|
||||
left: 1px;
|
||||
position: absolute;
|
||||
transition: .4s;
|
||||
content: "🌞";
|
||||
}
|
||||
hr.append-display {
|
||||
margin: 8px 0;
|
||||
border: none;
|
||||
height: 1px;
|
||||
border-top-width: 0;
|
||||
background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1));
|
||||
}
|
||||
.source-a {
|
||||
font-size: 0.8em;
|
||||
max-width: 100%;
|
||||
margin: 0;
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
flex-wrap: wrap;
|
||||
align-items: center;
|
||||
/* background-color: #dddddd88; */
|
||||
border-radius: 1.5rem;
|
||||
padding: 0.2em;
|
||||
}
|
||||
.source-a a {
|
||||
display: inline-block;
|
||||
background-color: #aaaaaa50;
|
||||
border-radius: 1rem;
|
||||
padding: 0.5em;
|
||||
text-align: center;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
min-width: 20%;
|
||||
white-space: nowrap;
|
||||
margin: 0.2rem 0.1rem;
|
||||
text-decoration: none !important;
|
||||
flex: 1;
|
||||
transition: flex 0.5s;
|
||||
}
|
||||
.source-a a:hover {
|
||||
background-color: #aaaaaa20;
|
||||
flex: 2;
|
||||
}
|
||||
input:checked + .apSlider {
|
||||
background-color: var(--primary-600);
|
||||
}
|
||||
input:checked + .apSlider::before {
|
||||
transform: translateX(23px);
|
||||
content:"🌚";
|
||||
}
|
||||
|
||||
/* Override Slider Styles (for webkit browsers like Safari and Chrome)
|
||||
* 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410
|
||||
* 进度滑块在各个平台还是太不统一了
|
||||
*/
|
||||
input[type="range"] {
|
||||
-webkit-appearance: none;
|
||||
height: 4px;
|
||||
background: var(--input-background-fill);
|
||||
border-radius: 5px;
|
||||
background-image: linear-gradient(var(--primary-500),var(--primary-500));
|
||||
background-size: 0% 100%;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
input[type="range"]::-webkit-slider-thumb {
|
||||
-webkit-appearance: none;
|
||||
height: 20px;
|
||||
width: 20px;
|
||||
border-radius: 50%;
|
||||
border: solid 0.5px #ddd;
|
||||
background-color: white;
|
||||
cursor: ew-resize;
|
||||
box-shadow: var(--input-shadow);
|
||||
transition: background-color .1s ease;
|
||||
}
|
||||
input[type="range"]::-webkit-slider-thumb:hover {
|
||||
background: var(--neutral-50);
|
||||
}
|
||||
input[type=range]::-webkit-slider-runnable-track {
|
||||
-webkit-appearance: none;
|
||||
box-shadow: none;
|
||||
border: none;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.submit_btn, #cancel_btn {
|
||||
height: 42px !important;
|
||||
}
|
||||
.submit_btn::before {
|
||||
content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
|
||||
height: 21px;
|
||||
}
|
||||
|
||||
#cancel_btn::before {
|
||||
content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
|
||||
height: 21px;
|
||||
}
|
||||
/* list */
|
||||
ol:not(.options), ul:not(.options) {
|
||||
padding-inline-start: 2em !important;
|
||||
}
|
||||
|
||||
/* 亮色(默认) */
|
||||
#main_chatbot {
|
||||
background-color: var(--chatbot-background-color-light) !important;
|
||||
color: var(--chatbot-color-light) !important;
|
||||
}
|
||||
/* 暗色 */
|
||||
.dark #main_chatbot {
|
||||
background-color: var(--block-background-fill) !important;
|
||||
color: var(--chatbot-color-dark) !important;
|
||||
}
|
||||
|
||||
/* 屏幕宽度大于等于500px的设备 */
|
||||
/* update on 2023.4.8: 高度的细致调整已写入JavaScript */
|
||||
@media screen and (min-width: 500px) {
|
||||
#main_chatbot {
|
||||
height: calc(100vh - 200px);
|
||||
}
|
||||
#main_chatbot .wrap {
|
||||
max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
|
||||
}
|
||||
}
|
||||
/* 屏幕宽度小于500px的设备 */
|
||||
@media screen and (max-width: 499px) {
|
||||
#main_chatbot {
|
||||
height: calc(100vh - 140px);
|
||||
}
|
||||
#main_chatbot .wrap {
|
||||
max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
|
||||
}
|
||||
[data-testid = "bot"] {
|
||||
max-width: 95% !important;
|
||||
}
|
||||
#app_title h1{
|
||||
letter-spacing: -1px; font-size: 22px;
|
||||
}
|
||||
}
|
||||
#main_chatbot .wrap {
|
||||
overflow-x: hidden
|
||||
}
|
||||
/* 对话气泡 */
|
||||
.message {
|
||||
border-radius: var(--radius-xl) !important;
|
||||
border: none;
|
||||
padding: var(--spacing-xl) !important;
|
||||
font-size: 15px !important;
|
||||
line-height: var(--line-md) !important;
|
||||
min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
||||
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
||||
}
|
||||
[data-testid = "bot"] {
|
||||
max-width: 85%;
|
||||
border-bottom-left-radius: 0 !important;
|
||||
}
|
||||
[data-testid = "user"] {
|
||||
max-width: 85%;
|
||||
width: auto !important;
|
||||
border-bottom-right-radius: 0 !important;
|
||||
}
|
||||
|
||||
.message p {
|
||||
margin-top: 0.6em !important;
|
||||
margin-bottom: 0.6em !important;
|
||||
}
|
||||
.message p:first-child { margin-top: 0 !important; }
|
||||
.message p:last-of-type { margin-bottom: 0 !important; }
|
||||
|
||||
.message .md-message {
|
||||
display: block;
|
||||
padding: 0 !important;
|
||||
}
|
||||
.message .raw-message {
|
||||
display: block;
|
||||
padding: 0 !important;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
.raw-message.hideM, .md-message.hideM {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* custom buttons */
|
||||
.chuanhu-btn {
|
||||
border-radius: 5px;
|
||||
/* background-color: #E6E6E6 !important; */
|
||||
color: rgba(120, 120, 120, 0.64) !important;
|
||||
padding: 4px !important;
|
||||
position: absolute;
|
||||
right: -22px;
|
||||
cursor: pointer !important;
|
||||
transition: color .2s ease, background-color .2s ease;
|
||||
}
|
||||
.chuanhu-btn:hover {
|
||||
background-color: rgba(167, 167, 167, 0.25) !important;
|
||||
color: unset !important;
|
||||
}
|
||||
.chuanhu-btn:active {
|
||||
background-color: rgba(167, 167, 167, 0.5) !important;
|
||||
}
|
||||
.chuanhu-btn:focus {
|
||||
outline: none;
|
||||
}
|
||||
.copy-bot-btn {
|
||||
/* top: 18px; */
|
||||
bottom: 0;
|
||||
}
|
||||
.toggle-md-btn {
|
||||
/* top: 0; */
|
||||
bottom: 20px;
|
||||
}
|
||||
.copy-code-btn {
|
||||
position: relative;
|
||||
float: right;
|
||||
font-size: 1em;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.message-wrap>div img{
|
||||
border-radius: 10px !important;
|
||||
}
|
||||
|
||||
/* history message */
|
||||
.wrap>.history-message {
|
||||
padding: 10px !important;
|
||||
}
|
||||
.history-message {
|
||||
/* padding: 0 !important; */
|
||||
opacity: 80%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.history-message>.history-message {
|
||||
padding: 0 !important;
|
||||
}
|
||||
.history-message>.message-wrap {
|
||||
padding: 0 !important;
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
.history-message>.message {
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
.wrap>.history-message::after {
|
||||
content: "";
|
||||
display: block;
|
||||
height: 2px;
|
||||
background-color: var(--body-text-color-subdued);
|
||||
margin-bottom: 10px;
|
||||
margin-top: -10px;
|
||||
clear: both;
|
||||
}
|
||||
.wrap>.history-message>:last-child::after {
|
||||
content: "仅供查看";
|
||||
display: block;
|
||||
text-align: center;
|
||||
color: var(--body-text-color-subdued);
|
||||
font-size: 0.8em;
|
||||
}
|
||||
|
||||
/* 表格 */
|
||||
table {
|
||||
margin: 1em 0;
|
||||
border-collapse: collapse;
|
||||
empty-cells: show;
|
||||
}
|
||||
td,th {
|
||||
border: 1.2px solid var(--border-color-primary) !important;
|
||||
padding: 0.2em;
|
||||
}
|
||||
thead {
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
}
|
||||
thead th {
|
||||
padding: .5em .2em;
|
||||
}
|
||||
/* 行内代码 */
|
||||
.message :not(pre) code {
|
||||
display: inline;
|
||||
white-space: break-spaces;
|
||||
border-radius: 6px;
|
||||
margin: 0 2px 0 2px;
|
||||
padding: .2em .4em .1em .4em;
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
}
|
||||
/* 代码块 */
|
||||
.message pre code {
|
||||
display: block;
|
||||
overflow: auto;
|
||||
white-space: pre;
|
||||
background-color: hsla(0, 0%, 7%, 70%)!important;
|
||||
border-radius: 10px;
|
||||
padding: 1.2em 1em 0em .5em;
|
||||
margin: 0.6em 2em 1em 0.2em;
|
||||
color: #FFF;
|
||||
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
|
||||
}
|
||||
.dark .message pre code {
|
||||
background-color: hsla(0, 0%, 20%, 300%)!important;
|
||||
}
|
||||
.message pre {
|
||||
padding: 0 !important;
|
||||
}
|
||||
.message pre code div.highlight {
|
||||
background-color: unset !important;
|
||||
}
|
||||
|
||||
button.copy-button {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* 代码高亮样式 */
|
||||
.codehilite .hll { background-color: #6e7681 }
|
||||
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
|
||||
.codehilite .err { color: #f85149 } /* Error */
|
||||
.codehilite .esc { color: #c9d1d9 } /* Escape */
|
||||
.codehilite .g { color: #c9d1d9 } /* Generic */
|
||||
.codehilite .k { color: #ff7b72 } /* Keyword */
|
||||
.codehilite .l { color: #a5d6ff } /* Literal */
|
||||
.codehilite .n { color: #c9d1d9 } /* Name */
|
||||
.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */
|
||||
.codehilite .x { color: #c9d1d9 } /* Other */
|
||||
.codehilite .p { color: #c9d1d9 } /* Punctuation */
|
||||
.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */
|
||||
.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */
|
||||
.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */
|
||||
.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */
|
||||
.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */
|
||||
.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */
|
||||
.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */
|
||||
.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */
|
||||
.codehilite .gr { color: #ffa198 } /* Generic.Error */
|
||||
.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */
|
||||
.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */
|
||||
.codehilite .go { color: #8b949e } /* Generic.Output */
|
||||
.codehilite .gp { color: #8b949e } /* Generic.Prompt */
|
||||
.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */
|
||||
.codehilite .gu { color: #79c0ff } /* Generic.Subheading */
|
||||
.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */
|
||||
.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */
|
||||
.codehilite .kc { color: #79c0ff } /* Keyword.Constant */
|
||||
.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */
|
||||
.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */
|
||||
.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */
|
||||
.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */
|
||||
.codehilite .kt { color: #ff7b72 } /* Keyword.Type */
|
||||
.codehilite .ld { color: #79c0ff } /* Literal.Date */
|
||||
.codehilite .m { color: #a5d6ff } /* Literal.Number */
|
||||
.codehilite .s { color: #a5d6ff } /* Literal.String */
|
||||
.codehilite .na { color: #c9d1d9 } /* Name.Attribute */
|
||||
.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */
|
||||
.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */
|
||||
.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */
|
||||
.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */
|
||||
.codehilite .ni { color: #ffa657 } /* Name.Entity */
|
||||
.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */
|
||||
.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */
|
||||
.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */
|
||||
.codehilite .nn { color: #ff7b72 } /* Name.Namespace */
|
||||
.codehilite .nx { color: #c9d1d9 } /* Name.Other */
|
||||
.codehilite .py { color: #79c0ff } /* Name.Property */
|
||||
.codehilite .nt { color: #7ee787 } /* Name.Tag */
|
||||
.codehilite .nv { color: #79c0ff } /* Name.Variable */
|
||||
.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */
|
||||
.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */
|
||||
.codehilite .w { color: #6e7681 } /* Text.Whitespace */
|
||||
.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */
|
||||
.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */
|
||||
.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */
|
||||
.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */
|
||||
.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */
|
||||
.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */
|
||||
.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */
|
||||
.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */
|
||||
.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */
|
||||
.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */
|
||||
.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */
|
||||
.codehilite .se { color: #79c0ff } /* Literal.String.Escape */
|
||||
.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */
|
||||
.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */
|
||||
.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */
|
||||
.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */
|
||||
.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */
|
||||
.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */
|
||||
.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */
|
||||
.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */
|
||||
.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */
|
||||
.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */
|
||||
.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */
|
||||
.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */
|
||||
.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */
|
||||
|
||||
.dark .codehilite .hll { background-color: #2C3B41 }
|
||||
.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */
|
||||
.dark .codehilite .err { color: #FF5370 } /* Error */
|
||||
.dark .codehilite .esc { color: #89DDFF } /* Escape */
|
||||
.dark .codehilite .g { color: #EEFFFF } /* Generic */
|
||||
.dark .codehilite .k { color: #BB80B3 } /* Keyword */
|
||||
.dark .codehilite .l { color: #C3E88D } /* Literal */
|
||||
.dark .codehilite .n { color: #EEFFFF } /* Name */
|
||||
.dark .codehilite .o { color: #89DDFF } /* Operator */
|
||||
.dark .codehilite .p { color: #89DDFF } /* Punctuation */
|
||||
.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */
|
||||
.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */
|
||||
.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */
|
||||
.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */
|
||||
.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */
|
||||
.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */
|
||||
.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */
|
||||
.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */
|
||||
.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */
|
||||
.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */
|
||||
.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */
|
||||
.dark .codehilite .go { color: #79d618 } /* Generic.Output */
|
||||
.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */
|
||||
.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */
|
||||
.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */
|
||||
.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */
|
||||
.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */
|
||||
.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */
|
||||
.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */
|
||||
.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */
|
||||
.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */
|
||||
.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */
|
||||
.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */
|
||||
.dark .codehilite .m { color: #F78C6C } /* Literal.Number */
|
||||
.dark .codehilite .s { color: #C3E88D } /* Literal.String */
|
||||
.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */
|
||||
.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */
|
||||
.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */
|
||||
.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */
|
||||
.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */
|
||||
.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */
|
||||
.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */
|
||||
.dark .codehilite .nf { color: #82AAFF } /* Name.Function */
|
||||
.dark .codehilite .nl { color: #82AAFF } /* Name.Label */
|
||||
.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */
|
||||
.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */
|
||||
.dark .codehilite .py { color: #FFCB6B } /* Name.Property */
|
||||
.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */
|
||||
.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */
|
||||
.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */
|
||||
.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */
|
||||
.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */
|
||||
.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */
|
||||
.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */
|
||||
.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */
|
||||
.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */
|
||||
.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */
|
||||
.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */
|
||||
.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */
|
||||
.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */
|
||||
.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */
|
||||
.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */
|
||||
.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */
|
||||
.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */
|
||||
.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */
|
||||
.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */
|
||||
.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */
|
||||
.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */
|
||||
.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */
|
||||
.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */
|
||||
.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */
|
||||
.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */
|
||||
.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */
|
||||
.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */
|
||||
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
|
||||
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
|
||||
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
|
||||
@ -1,465 +0,0 @@
|
||||
|
||||
// custom javascript here
|
||||
|
||||
const MAX_HISTORY_LENGTH = 32;
|
||||
|
||||
var key_down_history = [];
|
||||
var currentIndex = -1;
|
||||
var user_input_ta;
|
||||
|
||||
var gradioContainer = null;
|
||||
var user_input_ta = null;
|
||||
var chat_txt = null;
|
||||
var userInfoDiv = null;
|
||||
var appTitleDiv = null;
|
||||
var chatbot = null;
|
||||
var chatbotWrap = null;
|
||||
var apSwitch = null;
|
||||
var messageBotDivs = null;
|
||||
var loginUserForm = null;
|
||||
var logginUser = null;
|
||||
|
||||
var userLogged = false;
|
||||
var usernameGotten = false;
|
||||
var historyLoaded = false;
|
||||
|
||||
var ga = document.getElementsByTagName("gradio-app");
|
||||
var targetNode = ga[0];
|
||||
var isInIframe = (window.self !== window.top);
|
||||
var language = navigator.language.slice(0,2);
|
||||
|
||||
var forView_i18n = {
|
||||
'zh': "仅供查看",
|
||||
'en': "For viewing only",
|
||||
'ja': "閲覧専用",
|
||||
'fr': "Pour consultation seulement",
|
||||
'es': "Solo para visualización",
|
||||
};
|
||||
|
||||
var deleteConfirm_i18n_pref = {
|
||||
'zh': "你真的要删除 ",
|
||||
'en': "Are you sure you want to delete ",
|
||||
'ja': "本当に ",
|
||||
};
|
||||
var deleteConfirm_i18n_suff = {
|
||||
'zh': " 吗?",
|
||||
'en': " ?",
|
||||
'ja': " を削除してもよろしいですか?",
|
||||
};
|
||||
var deleteConfirm_msg_pref = "Are you sure you want to delete ";
|
||||
var deleteConfirm_msg_suff = " ?";
|
||||
|
||||
// gradio 页面加载好了么??? 我能动你的元素了么??
|
||||
function gradioLoaded(mutations) {
|
||||
for (var i = 0; i < mutations.length; i++) {
|
||||
if (mutations[i].addedNodes.length) {
|
||||
loginUserForm = document.querySelector(".gradio-container > .main > .wrap > .panel > .form")
|
||||
gradioContainer = document.querySelector(".gradio-container");
|
||||
chat_txt = document.getElementById('chat_txt');
|
||||
userInfoDiv = document.getElementById("user_info");
|
||||
appTitleDiv = document.getElementById("app_title");
|
||||
chatbot = document.querySelector('#废弃');
|
||||
chatbotWrap = document.querySelector('#废弃 > .wrap');
|
||||
apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
|
||||
|
||||
if (loginUserForm) {
|
||||
localStorage.setItem("userLogged", true);
|
||||
userLogged = true;
|
||||
}
|
||||
|
||||
if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没?
|
||||
adjustDarkMode();
|
||||
}
|
||||
if (chat_txt) { // chat_txt 加载出来了没?
|
||||
selectHistory();
|
||||
}
|
||||
if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没?
|
||||
if (!usernameGotten) {
|
||||
getUserInfo();
|
||||
}
|
||||
setTimeout(showOrHideUserInfo(), 2000);
|
||||
}
|
||||
if (chatbot) { // chatbot 加载出来了没?
|
||||
setChatbotHeight();
|
||||
}
|
||||
if (chatbotWrap) {
|
||||
if (!historyLoaded) {
|
||||
loadHistoryHtml();
|
||||
}
|
||||
setChatbotScroll();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function webLocale() {
|
||||
// console.log("webLocale", language);
|
||||
if (forView_i18n.hasOwnProperty(language)) {
|
||||
var forView = forView_i18n[language];
|
||||
var forViewStyle = document.createElement('style');
|
||||
forViewStyle.innerHTML = '.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }';
|
||||
document.head.appendChild(forViewStyle);
|
||||
}
|
||||
if (deleteConfirm_i18n_pref.hasOwnProperty(language)) {
|
||||
deleteConfirm_msg_pref = deleteConfirm_i18n_pref[language];
|
||||
deleteConfirm_msg_suff = deleteConfirm_i18n_suff[language];
|
||||
}
|
||||
}
|
||||
|
||||
function showConfirmationDialog(a, file, c) {
|
||||
if (file != "") {
|
||||
var result = confirm(deleteConfirm_msg_pref + file + deleteConfirm_msg_suff);
|
||||
if (result) {
|
||||
return [a, file, c];
|
||||
}
|
||||
}
|
||||
return [a, "CANCELED", c];
|
||||
}
|
||||
|
||||
function selectHistory() {
|
||||
user_input_ta = chat_txt.querySelector("textarea");
|
||||
if (user_input_ta) {
|
||||
observer.disconnect(); // 停止监听
|
||||
// 在 textarea 上监听 keydown 事件
|
||||
user_input_ta.addEventListener("keydown", function (event) {
|
||||
var value = user_input_ta.value.trim();
|
||||
// 判断按下的是否为方向键
|
||||
if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
|
||||
// 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
|
||||
if (value && key_down_history.indexOf(value) === -1)
|
||||
return;
|
||||
// 对于需要响应的动作,阻止默认行为。
|
||||
event.preventDefault();
|
||||
var length = key_down_history.length;
|
||||
if (length === 0) {
|
||||
currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
|
||||
return;
|
||||
}
|
||||
if (currentIndex === -1) {
|
||||
currentIndex = length;
|
||||
}
|
||||
if (event.code === 'ArrowUp' && currentIndex > 0) {
|
||||
currentIndex--;
|
||||
user_input_ta.value = key_down_history[currentIndex];
|
||||
} else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
|
||||
currentIndex++;
|
||||
user_input_ta.value = key_down_history[currentIndex];
|
||||
}
|
||||
user_input_ta.selectionStart = user_input_ta.value.length;
|
||||
user_input_ta.selectionEnd = user_input_ta.value.length;
|
||||
const input_event = new InputEvent("input", { bubbles: true, cancelable: true });
|
||||
user_input_ta.dispatchEvent(input_event);
|
||||
} else if (event.code === "Enter") {
|
||||
if (value) {
|
||||
currentIndex = -1;
|
||||
if (key_down_history.indexOf(value) === -1) {
|
||||
key_down_history.push(value);
|
||||
if (key_down_history.length > MAX_HISTORY_LENGTH) {
|
||||
key_down_history.shift();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
var username = null;
|
||||
function getUserInfo() {
|
||||
if (usernameGotten) {
|
||||
return;
|
||||
}
|
||||
userLogged = localStorage.getItem('userLogged');
|
||||
if (userLogged) {
|
||||
username = userInfoDiv.innerText;
|
||||
if (username) {
|
||||
if (username.includes("getting user info…")) {
|
||||
setTimeout(getUserInfo, 500);
|
||||
return;
|
||||
} else if (username === " ") {
|
||||
localStorage.removeItem("username");
|
||||
localStorage.removeItem("userLogged")
|
||||
userLogged = false;
|
||||
usernameGotten = true;
|
||||
return;
|
||||
} else {
|
||||
username = username.match(/User:\s*(.*)/)[1] || username;
|
||||
localStorage.setItem("username", username);
|
||||
usernameGotten = true;
|
||||
clearHistoryHtml();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function toggleUserInfoVisibility(shouldHide) {
|
||||
if (userInfoDiv) {
|
||||
if (shouldHide) {
|
||||
userInfoDiv.classList.add("hideK");
|
||||
} else {
|
||||
userInfoDiv.classList.remove("hideK");
|
||||
}
|
||||
}
|
||||
}
|
||||
function showOrHideUserInfo() {
|
||||
var sendBtn = document.getElementById("submit_btn");
|
||||
|
||||
// Bind mouse/touch events to show/hide user info
|
||||
appTitleDiv.addEventListener("mouseenter", function () {
|
||||
toggleUserInfoVisibility(false);
|
||||
});
|
||||
userInfoDiv.addEventListener("mouseenter", function () {
|
||||
toggleUserInfoVisibility(false);
|
||||
});
|
||||
sendBtn.addEventListener("mouseenter", function () {
|
||||
toggleUserInfoVisibility(false);
|
||||
});
|
||||
|
||||
appTitleDiv.addEventListener("mouseleave", function () {
|
||||
toggleUserInfoVisibility(true);
|
||||
});
|
||||
userInfoDiv.addEventListener("mouseleave", function () {
|
||||
toggleUserInfoVisibility(true);
|
||||
});
|
||||
sendBtn.addEventListener("mouseleave", function () {
|
||||
toggleUserInfoVisibility(true);
|
||||
});
|
||||
|
||||
appTitleDiv.ontouchstart = function () {
|
||||
toggleUserInfoVisibility(false);
|
||||
};
|
||||
userInfoDiv.ontouchstart = function () {
|
||||
toggleUserInfoVisibility(false);
|
||||
};
|
||||
sendBtn.ontouchstart = function () {
|
||||
toggleUserInfoVisibility(false);
|
||||
};
|
||||
|
||||
appTitleDiv.ontouchend = function () {
|
||||
setTimeout(function () {
|
||||
toggleUserInfoVisibility(true);
|
||||
}, 3000);
|
||||
};
|
||||
userInfoDiv.ontouchend = function () {
|
||||
setTimeout(function () {
|
||||
toggleUserInfoVisibility(true);
|
||||
}, 3000);
|
||||
};
|
||||
sendBtn.ontouchend = function () {
|
||||
setTimeout(function () {
|
||||
toggleUserInfoVisibility(true);
|
||||
}, 3000); // Delay 1 second to hide user info
|
||||
};
|
||||
|
||||
// Hide user info after 2 second
|
||||
setTimeout(function () {
|
||||
toggleUserInfoVisibility(true);
|
||||
}, 2000);
|
||||
}
|
||||
|
||||
function toggleDarkMode(isEnabled) {
|
||||
if (isEnabled) {
|
||||
document.body.classList.add("dark");
|
||||
document.body.style.setProperty("background-color", "var(--neutral-950)", "important");
|
||||
} else {
|
||||
document.body.classList.remove("dark");
|
||||
document.body.style.backgroundColor = "";
|
||||
}
|
||||
}
|
||||
function adjustDarkMode() {
|
||||
const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)");
|
||||
|
||||
// 根据当前颜色模式设置初始状态
|
||||
apSwitch.checked = darkModeQuery.matches;
|
||||
toggleDarkMode(darkModeQuery.matches);
|
||||
// 监听颜色模式变化
|
||||
darkModeQuery.addEventListener("change", (e) => {
|
||||
apSwitch.checked = e.matches;
|
||||
toggleDarkMode(e.matches);
|
||||
});
|
||||
// apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
|
||||
apSwitch.addEventListener("change", (e) => {
|
||||
toggleDarkMode(e.target.checked);
|
||||
});
|
||||
}
|
||||
|
||||
function setChatbotHeight() {
|
||||
const screenWidth = window.innerWidth;
|
||||
const statusDisplay = document.querySelector('#status_display');
|
||||
const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
|
||||
const wrap = chatbot.querySelector('.wrap');
|
||||
const vh = window.innerHeight * 0.01;
|
||||
document.documentElement.style.setProperty('--vh', `${vh}px`);
|
||||
if (isInIframe) {
|
||||
chatbot.style.height = `700px`;
|
||||
wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`
|
||||
} else {
|
||||
if (screenWidth <= 320) {
|
||||
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
|
||||
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
||||
} else if (screenWidth <= 499) {
|
||||
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
|
||||
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
||||
} else {
|
||||
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
|
||||
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
||||
}
|
||||
}
|
||||
}
|
||||
function setChatbotScroll() {
|
||||
var scrollHeight = chatbotWrap.scrollHeight;
|
||||
chatbotWrap.scrollTo(0,scrollHeight)
|
||||
}
|
||||
var rangeInputs = null;
|
||||
var numberInputs = null;
|
||||
function setSlider() {
|
||||
rangeInputs = document.querySelectorAll('input[type="range"]');
|
||||
numberInputs = document.querySelectorAll('input[type="number"]')
|
||||
setSliderRange();
|
||||
rangeInputs.forEach(rangeInput => {
|
||||
rangeInput.addEventListener('input', setSliderRange);
|
||||
});
|
||||
numberInputs.forEach(numberInput => {
|
||||
numberInput.addEventListener('input', setSliderRange);
|
||||
})
|
||||
}
|
||||
function setSliderRange() {
|
||||
var range = document.querySelectorAll('input[type="range"]');
|
||||
range.forEach(range => {
|
||||
range.style.backgroundSize = (range.value - range.min) / (range.max - range.min) * 100 + '% 100%';
|
||||
});
|
||||
}
|
||||
|
||||
function addChuanhuButton(botElement) {
|
||||
var rawMessage = null;
|
||||
var mdMessage = null;
|
||||
rawMessage = botElement.querySelector('.raw-message');
|
||||
mdMessage = botElement.querySelector('.md-message');
|
||||
if (!rawMessage) {
|
||||
var buttons = botElement.querySelectorAll('button.chuanhu-btn');
|
||||
for (var i = 0; i < buttons.length; i++) {
|
||||
buttons[i].parentNode.removeChild(buttons[i]);
|
||||
}
|
||||
return;
|
||||
}
|
||||
var copyButton = null;
|
||||
var toggleButton = null;
|
||||
copyButton = botElement.querySelector('button.copy-bot-btn');
|
||||
toggleButton = botElement.querySelector('button.toggle-md-btn');
|
||||
if (copyButton) copyButton.remove();
|
||||
if (toggleButton) toggleButton.remove();
|
||||
|
||||
// Copy bot button
|
||||
var copyButton = document.createElement('button');
|
||||
copyButton.classList.add('chuanhu-btn');
|
||||
copyButton.classList.add('copy-bot-btn');
|
||||
copyButton.setAttribute('aria-label', 'Copy');
|
||||
copyButton.innerHTML = copyIcon;
|
||||
copyButton.addEventListener('click', () => {
|
||||
const textToCopy = rawMessage.innerText;
|
||||
navigator.clipboard
|
||||
.writeText(textToCopy)
|
||||
.then(() => {
|
||||
copyButton.innerHTML = copiedIcon;
|
||||
setTimeout(() => {
|
||||
copyButton.innerHTML = copyIcon;
|
||||
}, 1500);
|
||||
})
|
||||
.catch(() => {
|
||||
console.error("copy failed");
|
||||
});
|
||||
});
|
||||
botElement.appendChild(copyButton);
|
||||
|
||||
// Toggle button
|
||||
var toggleButton = document.createElement('button');
|
||||
toggleButton.classList.add('chuanhu-btn');
|
||||
toggleButton.classList.add('toggle-md-btn');
|
||||
toggleButton.setAttribute('aria-label', 'Toggle');
|
||||
var renderMarkdown = mdMessage.classList.contains('hideM');
|
||||
toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon;
|
||||
toggleButton.addEventListener('click', () => {
|
||||
renderMarkdown = mdMessage.classList.contains('hideM');
|
||||
if (renderMarkdown){
|
||||
renderMarkdownText(botElement);
|
||||
toggleButton.innerHTML=rawIcon;
|
||||
} else {
|
||||
removeMarkdownText(botElement);
|
||||
toggleButton.innerHTML=mdIcon;
|
||||
}
|
||||
});
|
||||
botElement.insertBefore(toggleButton, copyButton);
|
||||
}
|
||||
|
||||
function renderMarkdownText(message) {
|
||||
var mdDiv = message.querySelector('.md-message');
|
||||
if (mdDiv) mdDiv.classList.remove('hideM');
|
||||
var rawDiv = message.querySelector('.raw-message');
|
||||
if (rawDiv) rawDiv.classList.add('hideM');
|
||||
}
|
||||
function removeMarkdownText(message) {
|
||||
var rawDiv = message.querySelector('.raw-message');
|
||||
if (rawDiv) rawDiv.classList.remove('hideM');
|
||||
var mdDiv = message.querySelector('.md-message');
|
||||
if (mdDiv) mdDiv.classList.add('hideM');
|
||||
}
|
||||
|
||||
let timeoutId;
|
||||
let isThrottled = false;
|
||||
var mmutation
|
||||
// 监听所有元素中 bot message 的变化,为 bot 消息添加复制按钮。
|
||||
var mObserver = new MutationObserver(function (mutationsList) {
|
||||
for (mmutation of mutationsList) {
|
||||
if (mmutation.type === 'childList') {
|
||||
for (var node of mmutation.addedNodes) {
|
||||
if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
|
||||
saveHistoryHtml();
|
||||
document.querySelectorAll('#废弃>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
|
||||
}
|
||||
if (node.tagName === 'INPUT' && node.getAttribute('type') === 'range') {
|
||||
setSlider();
|
||||
}
|
||||
}
|
||||
for (var node of mmutation.removedNodes) {
|
||||
if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
|
||||
saveHistoryHtml();
|
||||
document.querySelectorAll('#废弃>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
|
||||
}
|
||||
}
|
||||
} else if (mmutation.type === 'attributes') {
|
||||
if (mmutation.target.nodeType === 1 && mmutation.target.classList.contains('message') && mmutation.target.getAttribute('data-testid') === 'bot') {
|
||||
if (isThrottled) break; // 为了防止重复不断疯狂渲染,加上等待_(:з」∠)_
|
||||
isThrottled = true;
|
||||
clearTimeout(timeoutId);
|
||||
timeoutId = setTimeout(() => {
|
||||
isThrottled = false;
|
||||
document.querySelectorAll('#废弃>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
|
||||
saveHistoryHtml();
|
||||
}, 500);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
mObserver.observe(document.documentElement, { attributes: true, childList: true, subtree: true });
|
||||
|
||||
|
||||
// 监视页面内部 DOM 变动
|
||||
var observer = new MutationObserver(function (mutations) {
|
||||
gradioLoaded(mutations);
|
||||
});
|
||||
observer.observe(targetNode, { childList: true, subtree: true });
|
||||
|
||||
// 监视页面变化
|
||||
window.addEventListener("DOMContentLoaded", function () {
|
||||
isInIframe = (window.self !== window.top);
|
||||
historyLoaded = false;
|
||||
});
|
||||
window.addEventListener('resize', setChatbotHeight);
|
||||
window.addEventListener('scroll', setChatbotHeight);
|
||||
window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
|
||||
|
||||
// button svg code
|
||||
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
||||
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
||||
const mdIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="1" viewBox="0 0 14 18" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><g transform-origin="center" transform="scale(0.85)"><path d="M1.5,0 L12.5,0 C13.3284271,-1.52179594e-16 14,0.671572875 14,1.5 L14,16.5 C14,17.3284271 13.3284271,18 12.5,18 L1.5,18 C0.671572875,18 1.01453063e-16,17.3284271 0,16.5 L0,1.5 C-1.01453063e-16,0.671572875 0.671572875,1.52179594e-16 1.5,0 Z" stroke-width="1.8"></path><line x1="3.5" y1="3.5" x2="10.5" y2="3.5"></line><line x1="3.5" y1="6.5" x2="8" y2="6.5"></line></g><path d="M4,9 L10,9 C10.5522847,9 11,9.44771525 11,10 L11,13.5 C11,14.0522847 10.5522847,14.5 10,14.5 L4,14.5 C3.44771525,14.5 3,14.0522847 3,13.5 L3,10 C3,9.44771525 3.44771525,9 4,9 Z" stroke="none" fill="currentColor"></path></svg></span>';
|
||||
const rawIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="1.8" viewBox="0 0 18 14" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><g transform-origin="center" transform="scale(0.85)"><polyline points="4 3 0 7 4 11"></polyline><polyline points="14 3 18 7 14 11"></polyline><line x1="12" y1="0" x2="6" y2="14"></line></g></svg></span>';
|
||||
@ -1,2 +0,0 @@
|
||||
|
||||
// external javascript here
|
||||
@ -1,8 +0,0 @@
|
||||
<div style="display: flex; justify-content: space-between;">
|
||||
<span>
|
||||
<label class="apSwitch" for="checkbox">
|
||||
<input type="checkbox" id="checkbox">
|
||||
<div class="apSlider"></div>
|
||||
</label>
|
||||
</span>
|
||||
</div>
|
||||
@ -1,9 +0,0 @@
|
||||
<b>{label}</b>
|
||||
<div class="progress-bar">
|
||||
<div class="progress" style="width: {usage_percent}%;">
|
||||
<span class="progress-text">{usage_percent}%</span>
|
||||
</div>
|
||||
</div>
|
||||
<div style="display: flex; justify-content: space-between;">
|
||||
<span>${rounded_usage}</span><span>${usage_limit}</span>
|
||||
</div>
|
||||
@ -1 +0,0 @@
|
||||
<div class="versions">{versions}</div>
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 18 KiB |
@ -49,7 +49,7 @@ def markdown_convertion(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
"""
|
||||
pre = '<div class="md-message">'
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
if txt.startswith(pre) and txt.endswith(suf):
|
||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||
|
||||
@ -265,7 +265,7 @@
|
||||
"例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "e.g. chatglm&gpt-3.5-turbo&api2d-gpt-4",
|
||||
"先切换模型到openai或api2d": "Switch the model to openai or api2d first",
|
||||
"在这里输入分辨率": "Enter the resolution here",
|
||||
"如'256x256', '512x512', '1024x1024'": "e.g. '256x256', '512x512', '1024x1024'",
|
||||
"如256x256": "e.g. 256x256",
|
||||
"默认": "Default",
|
||||
"建议您复制一个config_private.py放自己的秘密": "We suggest you to copy a config_private.py file to keep your secrets, such as API and proxy URLs, from being accidentally uploaded to Github and seen by others.",
|
||||
"如API和代理网址": "Such as API and proxy URLs",
|
||||
@ -1667,5 +1667,294 @@
|
||||
"段音频的主要内容": "The main content of the segment audio is",
|
||||
"z$ 分别是空间直角坐标系中的三个坐标": "z$, respectively, are the three coordinates in the spatial rectangular coordinate system",
|
||||
"这个是怎么识别的呢我也不清楚": "I'm not sure how this is recognized",
|
||||
"从现在起": "From now on"
|
||||
"从现在起": "From now on",
|
||||
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
|
||||
"联网的ChatGPT_bing版": "OnlineChatGPT_BingEdition",
|
||||
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
||||
"Langchain知识库": "LangchainKnowledgeBase",
|
||||
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
||||
"Latex输出PDF结果": "OutputPDFFromLatex",
|
||||
"Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF",
|
||||
"sprint亮靛": "SprintIndigo",
|
||||
"寻找Latex主文件": "FindLatexMainFile",
|
||||
"专业词汇声明": "ProfessionalTerminologyDeclaration",
|
||||
"Latex精细分解与转化": "DecomposeAndConvertLatex",
|
||||
"编译Latex": "CompileLatex",
|
||||
"如果您是论文原作者": "If you are the original author of the paper",
|
||||
"正在编译对比PDF": "Compiling the comparison PDF",
|
||||
"将 \\include 命令转换为 \\input 命令": "Converting the \\include command to the \\input command",
|
||||
"取评分最高者返回": "Returning the highest-rated one",
|
||||
"不要修改!! 高危设置!通过修改此设置": "Do not modify!! High-risk setting! By modifying this setting",
|
||||
"Tex源文件缺失!": "Tex source file is missing!",
|
||||
"6.25 加入判定latex模板的代码": "Added code to determine the latex template on June 25",
|
||||
"正在精细切分latex文件": "Finely splitting the latex file",
|
||||
"获取response失败": "Failed to get response",
|
||||
"手动指定语言": "Manually specify the language",
|
||||
"输入arxivID": "Enter arxivID",
|
||||
"对输入的word文档进行摘要生成": "Generate a summary of the input word document",
|
||||
"将指定目录下的PDF文件从英文翻译成中文": "Translate PDF files from English to Chinese in the specified directory",
|
||||
"如果分析错误": "If the analysis is incorrect",
|
||||
"尝试第": "Try the",
|
||||
"用户填3": "User fills in 3",
|
||||
"请在此处追加更细致的矫错指令": "Please append more detailed correction instructions here",
|
||||
"为了防止大语言模型的意外谬误产生扩散影响": "To prevent the accidental spread of errors in large language models",
|
||||
"前面是中文冒号": "The colon before is in Chinese",
|
||||
"内含已经翻译的Tex文档": "Contains a Tex document that has been translated",
|
||||
"成功啦": "Success!",
|
||||
"刷新页面即可以退出UpdateKnowledgeArchive模式": "Refresh the page to exit UpdateKnowledgeArchive mode",
|
||||
"或者不在环境变量PATH中": "Or not in the environment variable PATH",
|
||||
"--读取文件": "--Read the file",
|
||||
"才能继续下面的步骤": "To continue with the next steps",
|
||||
"代理数据解析失败": "Proxy data parsing failed",
|
||||
"详见项目主README.md": "See the main README.md of the project for details",
|
||||
"临时存储用于调试": "Temporarily stored for debugging",
|
||||
"屏蔽空行和太短的句子": "Filter out empty lines and sentences that are too short",
|
||||
"gpt 多线程请求": "GPT multi-threaded request",
|
||||
"编译已经开始": "Compilation has started",
|
||||
"无法找到一个主Tex文件": "Cannot find a main Tex file",
|
||||
"修复括号": "Fix parentheses",
|
||||
"请您不要删除或修改这行警告": "Please do not delete or modify this warning",
|
||||
"请登录OpenAI查看详情 https": "Please log in to OpenAI to view details at https",
|
||||
"调用函数": "Call a function",
|
||||
"请查看终端的输出或耐心等待": "Please check the output in the terminal or wait patiently",
|
||||
"LatexEnglishCorrection+高亮修正位置": "Latex English correction + highlight correction position",
|
||||
"行": "line",
|
||||
"Newbing 请求失败": "Newbing request failed",
|
||||
"转化PDF编译是否成功": "Check if the conversion to PDF and compilation were successful",
|
||||
"建议更换代理协议": "Recommend changing the proxy protocol",
|
||||
"========================================= 插件主程序1 =====================================================": "========================================= Plugin Main Program 1 =====================================================",
|
||||
"终端": "terminal",
|
||||
"请先上传文件素材": "Please upload file materials first",
|
||||
"前面是中文逗号": "There is a Chinese comma in front",
|
||||
"请尝试把以下指令复制到高级参数区": "Please try copying the following instructions to the advanced parameters section",
|
||||
"翻译-": "Translation -",
|
||||
"请耐心等待": "Please be patient",
|
||||
"将前后断行符脱离": "Remove line breaks before and after",
|
||||
"json等": "JSON, etc.",
|
||||
"生成中文PDF": "Generate Chinese PDF",
|
||||
"用红色标注处保留区": "Use red color to highlight the reserved area",
|
||||
"对比PDF编译是否成功": "Compare if the PDF compilation was successful",
|
||||
"回答完问题后": "After answering the question",
|
||||
"其他操作系统表现未知": "Unknown performance on other operating systems",
|
||||
"-构建知识库": "Build knowledge base",
|
||||
"还原原文": "Restore original text",
|
||||
"或者重启之后再度尝试": "Or try again after restarting",
|
||||
"免费": "Free",
|
||||
"仅在Windows系统进行了测试": "Tested only on Windows system",
|
||||
"欢迎加REAME中的QQ联系开发者": "Feel free to contact the developer via QQ in REAME",
|
||||
"当前知识库内的有效文件": "Valid files in the current knowledge base",
|
||||
"您可以到Github Issue区": "You can go to the Github Issue area",
|
||||
"刷新Gradio前端界面": "Refresh the Gradio frontend interface",
|
||||
"吸收title与作者以上的部分": "Include the title and the above part of the author",
|
||||
"给出一些判定模板文档的词作为扣分项": "Provide some words in the template document as deduction items",
|
||||
"--读取参数": "-- Read parameters",
|
||||
"然后进行问答": "And then perform question-answering",
|
||||
"根据自然语言执行插件命令": "Execute plugin commands based on natural language",
|
||||
"*{\\scriptsize\\textbf{警告": "*{\\scriptsize\\textbf{Warning",
|
||||
"但请查收结果": "But please check the results",
|
||||
"翻译内容可靠性无保障": "No guarantee of translation accuracy",
|
||||
"寻找主文件": "Find the main file",
|
||||
"消耗时间的函数": "Time-consuming function",
|
||||
"当前语言模型温度设定": "Current language model temperature setting",
|
||||
"这需要一段时间计算": "This requires some time to calculate",
|
||||
"为啥chatgpt会把cite里面的逗号换成中文逗号呀": "Why does ChatGPT change commas inside 'cite' to Chinese commas?",
|
||||
"发现已经存在翻译好的PDF文档": "Found an already translated PDF document",
|
||||
"待提取的知识库名称id": "Knowledge base name ID to be extracted",
|
||||
"文本碎片重组为完整的tex片段": "Reassemble text fragments into complete tex fragments",
|
||||
"注意事项": "Notes",
|
||||
"参数说明": "Parameter description",
|
||||
"或代理节点": "Or proxy node",
|
||||
"构建知识库": "Building knowledge base",
|
||||
"报错信息如下. 如果是与网络相关的问题": "Error message as follows. If it is related to network issues",
|
||||
"功能描述": "Function description",
|
||||
"禁止移除或修改此警告": "Removal or modification of this warning is prohibited",
|
||||
"Arixv翻译": "Arixv translation",
|
||||
"读取优先级": "Read priority",
|
||||
"包含documentclass关键字": "Contains the documentclass keyword",
|
||||
"根据文本使用GPT模型生成相应的图像": "Generate corresponding images using GPT model based on the text",
|
||||
"图像生成所用到的提示文本": "Prompt text used for image generation",
|
||||
"Your account is not active. OpenAI以账户失效为由": "Your account is not active. OpenAI states that it is due to account expiration",
|
||||
"快捷的调试函数": "Convenient debugging function",
|
||||
"在多Tex文档中": "In multiple Tex documents",
|
||||
"因此选择GenerateImage函数": "Therefore, choose the GenerateImage function",
|
||||
"当前工作路径为": "The current working directory is",
|
||||
"实际得到格式": "Obtained format in reality",
|
||||
"这段代码定义了一个名为TempProxy的空上下文管理器": "This code defines an empty context manager named TempProxy",
|
||||
"吸收其他杂项": "Absorb other miscellaneous items",
|
||||
"请输入要翻译成哪种语言": "Please enter which language to translate into",
|
||||
"的单词": "of the word",
|
||||
"正在尝试自动安装": "Attempting automatic installation",
|
||||
"如果有必要": "If necessary",
|
||||
"开始下载": "Start downloading",
|
||||
"项目Github地址 \\url{https": "Project GitHub address \\url{https",
|
||||
"将根据报错信息修正tex源文件并重试": "The Tex source file will be corrected and retried based on the error message",
|
||||
"发送至azure openai api": "Send to Azure OpenAI API",
|
||||
"吸收匿名公式": "Absorb anonymous formulas",
|
||||
"用该压缩包+ConversationHistoryArchive进行反馈": "Provide feedback using the compressed package + ConversationHistoryArchive",
|
||||
"需要特殊依赖": "Requires special dependencies",
|
||||
"还原部分原文": "Restore part of the original text",
|
||||
"构建完成": "Build completed",
|
||||
"解析arxiv网址失败": "Failed to parse arXiv URL",
|
||||
"输入问题后点击该插件": "Click the plugin after entering the question",
|
||||
"请求子进程": "Requesting subprocess",
|
||||
"请务必用 pip install -r requirements.txt 指令安装依赖": "Please make sure to install the dependencies using the 'pip install -r requirements.txt' command",
|
||||
"如果程序停顿5分钟以上": "If the program pauses for more than 5 minutes",
|
||||
"转化PDF编译已经成功": "Conversion to PDF compilation was successful",
|
||||
"虽然PDF生成失败了": "Although PDF generation failed",
|
||||
"分析上述回答": "Analyze the above answer",
|
||||
"吸收在42行以内的begin-end组合": "Absorb the begin-end combination within 42 lines",
|
||||
"推荐http": "Recommend http",
|
||||
"Latex没有安装": "Latex is not installed",
|
||||
"用latex编译为PDF对修正处做高亮": "Compile to PDF using LaTeX and highlight the corrections",
|
||||
"reverse 操作必须放在最后": "'reverse' operation must be placed at the end",
|
||||
"AZURE OPENAI API拒绝了请求": "AZURE OPENAI API rejected the request",
|
||||
"该项目的Latex主文件是": "The main LaTeX file of this project is",
|
||||
"You are associated with a deactivated account. OpenAI以账户失效为由": "You are associated with a deactivated account. OpenAI considers it as an account expiration",
|
||||
"它*必须*被包含在AVAIL_LLM_MODELS列表中": "It *must* be included in the AVAIL_LLM_MODELS list",
|
||||
"未知指令": "Unknown command",
|
||||
"尝试执行Latex指令失败": "Failed to execute the LaTeX command",
|
||||
"摘要生成后的文档路径": "Path of the document after summary generation",
|
||||
"GPT结果已输出": "GPT result has been outputted",
|
||||
"使用Newbing": "Using Newbing",
|
||||
"其他模型转化效果未知": "Unknown conversion effect of other models",
|
||||
"P.S. 但愿没人把latex模板放在里面传进来": "P.S. Hopefully, no one passes a LaTeX template in it",
|
||||
"定位主Latex文件": "Locate the main LaTeX file",
|
||||
"后面是英文冒号": "English colon follows",
|
||||
"文档越长耗时越长": "The longer the document, the longer it takes.",
|
||||
"压缩包": "Compressed file",
|
||||
"但通常不会出现在正文": "But usually does not appear in the body.",
|
||||
"正在预热文本向量化模组": "Preheating text vectorization module",
|
||||
"5刀": "5 dollars",
|
||||
"提问吧! 但注意": "Ask questions! But be careful",
|
||||
"发送至AZURE OPENAI API": "Send to AZURE OPENAI API",
|
||||
"请仔细鉴别并以原文为准": "Please carefully verify and refer to the original text",
|
||||
"如果需要使用AZURE 详情请见额外文档 docs\\use_azure.md": "If you need to use AZURE, please refer to the additional document docs\\use_azure.md for details",
|
||||
"使用正则表达式查找半行注释": "Use regular expressions to find inline comments",
|
||||
"只有第二步成功": "Only the second step is successful",
|
||||
"P.S. 顺便把CTEX塞进去以支持中文": "P.S. By the way, include CTEX to support Chinese",
|
||||
"安装方法https": "Installation method: https",
|
||||
"则跳过GPT请求环节": "Then skip the GPT request process",
|
||||
"请切换至“UpdateKnowledgeArchive”插件进行知识库访问": "Please switch to the 'UpdateKnowledgeArchive' plugin for knowledge base access",
|
||||
"=================================== 工具函数 ===============================================": "=================================== Utility functions ===============================================",
|
||||
"填入azure openai api的密钥": "Fill in the Azure OpenAI API key",
|
||||
"上传Latex压缩包": "Upload LaTeX compressed file",
|
||||
"远程云服务器部署": "Deploy to remote cloud server",
|
||||
"用黑色标注转换区": "Use black color to annotate the conversion area",
|
||||
"音频文件的路径": "Path to the audio file",
|
||||
"必须包含documentclass": "Must include documentclass",
|
||||
"再列出用户可能提出的三个问题": "List three more questions that the user might ask",
|
||||
"根据需要切换prompt": "Switch the prompt as needed",
|
||||
"将文件复制一份到下载区": "Make a copy of the file in the download area",
|
||||
"次编译": "Second compilation",
|
||||
"Latex文件融合完成": "LaTeX file merging completed",
|
||||
"返回": "Return",
|
||||
"后面是英文逗号": "Comma after this",
|
||||
"对不同latex源文件扣分": "Deduct points for different LaTeX source files",
|
||||
"失败啦": "Failed",
|
||||
"编译BibTex": "Compile BibTeX",
|
||||
"Linux下必须使用Docker安装": "Must install using Docker on Linux",
|
||||
"报错信息": "Error message",
|
||||
"删除或修改歧义文件": "Delete or modify ambiguous files",
|
||||
"-预热文本向量化模组": "- Preheating text vectorization module",
|
||||
"将每次对话记录写入Markdown格式的文件中": "Write each conversation record into a file in Markdown format",
|
||||
"其他类型文献转化效果未知": "Unknown conversion effect for other types of literature",
|
||||
"获取线程锁": "Acquire thread lock",
|
||||
"使用英文": "Use English",
|
||||
"如果存在调试缓存文件": "If there is a debug cache file",
|
||||
"您需要首先调用构建知识库": "You need to call the knowledge base building first",
|
||||
"原始PDF编译是否成功": "Whether the original PDF compilation is successful",
|
||||
"生成 azure openai api请求": "Generate Azure OpenAI API requests",
|
||||
"正在编译PDF": "Compiling PDF",
|
||||
"仅调试": "Debug only",
|
||||
"========================================= 插件主程序2 =====================================================": "========================================= Plugin Main Program 2 =====================================================",
|
||||
"多线程翻译开始": "Multithreaded translation begins",
|
||||
"出问题了": "There is a problem",
|
||||
"版权归原文作者所有": "Copyright belongs to the original author",
|
||||
"当前大语言模型": "Current large language model",
|
||||
"目前对机器学习类文献转化效果最好": "Currently, the best conversion effect for machine learning literature",
|
||||
"这个paper有个input命令文件名大小写错误!": "This paper has an input command with a filename case error!",
|
||||
"期望格式例如": "Expected format, for example",
|
||||
"解决部分词汇翻译不准确的问题": "Resolve the issue of inaccurate translation for some terms",
|
||||
"待注入的知识库名称id": "Name/ID of the knowledge base to be injected",
|
||||
"精细切分latex文件": "Fine-grained segmentation of LaTeX files",
|
||||
"永远给定None": "Always given None",
|
||||
"work_folder = Latex预处理": "work_folder = LaTeX preprocessing",
|
||||
"请直接去该路径下取回翻译结果": "Please directly go to the path to retrieve the translation results",
|
||||
"寻找主tex文件": "Finding the main .tex file",
|
||||
"模型参数": "Model parameters",
|
||||
"返回找到的第一个": "Return the first one found",
|
||||
"编译转化后的PDF": "Compile the converted PDF",
|
||||
"\\SEAFILE_LOCALŅ03047\\我的资料库\\music\\Akie秋绘-未来轮廓.mp3": "\\SEAFILE_LOCALŅ03047\\My Library\\music\\Akie秋绘-未来轮廓.mp3",
|
||||
"拆分过长的latex片段": "Splitting overly long LaTeX fragments",
|
||||
"没有找到任何可读取文件": "No readable files found",
|
||||
"暗色模式 / 亮色模式": "Dark mode / Light mode",
|
||||
"检测到arxiv文档连接": "Detected arXiv document link",
|
||||
"此插件Windows支持最佳": "This plugin has best support for Windows",
|
||||
"from crazy_functions.虚空终端 import 终端": "from crazy_functions.null_terminal import Terminal",
|
||||
"本地论文翻译": "Local paper translation",
|
||||
"输出html调试文件": "Output HTML debugging file",
|
||||
"以下所有配置也都支持利用环境变量覆写": "All the following configurations can also be overridden using environment variables",
|
||||
"PDF文件所在的路径": "Path of the PDF file",
|
||||
"也是可读的": "It is also readable",
|
||||
"将消耗较长时间下载中文向量化模型": "Downloading Chinese vectorization model will take a long time",
|
||||
"环境变量配置格式见docker-compose.yml": "See docker-compose.yml for the format of environment variable configuration",
|
||||
"编译文献交叉引用": "Compile bibliographic cross-references",
|
||||
"默认为default": "Default is 'default'",
|
||||
"或者使用此插件继续上传更多文件": "Or use this plugin to continue uploading more files",
|
||||
"该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成": "This PDF is generated by the GPT-Academic open-source project using a large language model + LaTeX translation plugin",
|
||||
"使用latexdiff生成论文转化前后对比": "Use latexdiff to generate before and after comparison of paper transformation",
|
||||
"正在编译PDF文档": "Compiling PDF document",
|
||||
"读取config.py文件中关于AZURE OPENAI API的信息": "Read the information about AZURE OPENAI API from the config.py file",
|
||||
"配置教程&视频教程": "Configuration tutorial & video tutorial",
|
||||
"临时地启动代理网络": "Temporarily start proxy network",
|
||||
"临时地激活代理网络": "Temporarily activate proxy network",
|
||||
"功能尚不稳定": "Functionality is unstable",
|
||||
"默认为Chinese": "Default is Chinese",
|
||||
"请查收结果": "Please check the results",
|
||||
"将 chatglm 直接对齐到 chatglm2": "Align chatglm directly to chatglm2",
|
||||
"中读取数据构建知识库": "Build a knowledge base by reading data in",
|
||||
"用于给一小段代码上代理": "Used to proxy a small piece of code",
|
||||
"分析结果": "Analysis results",
|
||||
"依赖不足": "Insufficient dependencies",
|
||||
"Markdown翻译": "Markdown translation",
|
||||
"除非您是论文的原作者": "Unless you are the original author of the paper",
|
||||
"test_LangchainKnowledgeBase读取": "test_LangchainKnowledgeBase read",
|
||||
"将多文件tex工程融合为一个巨型tex": "Merge multiple tex projects into one giant tex",
|
||||
"吸收iffalse注释": "Absorb iffalser comments",
|
||||
"您接下来不能再使用其他插件了": "You can no longer use other plugins next",
|
||||
"正在构建知识库": "Building knowledge base",
|
||||
"需Latex": "Requires Latex",
|
||||
"即找不到": "That is not found",
|
||||
"保证括号正确": "Ensure parentheses are correct",
|
||||
"= 2 通过一些Latex模板中常见": "= 2 through some common Latex templates",
|
||||
"请立即终止程序": "Please terminate the program immediately",
|
||||
"解压失败! 需要安装pip install rarfile来解压rar文件": "Decompression failed! Install 'pip install rarfile' to decompress rar files",
|
||||
"请在此处给出自定义翻译命令": "Please provide custom translation command here",
|
||||
"解压失败! 需要安装pip install py7zr来解压7z文件": "Decompression failed! Install 'pip install py7zr' to decompress 7z files",
|
||||
"执行错误": "Execution error",
|
||||
"目前仅支持GPT3.5/GPT4": "Currently only supports GPT3.5/GPT4",
|
||||
"P.S. 顺便把Latex的注释去除": "P.S. Also remove comments from Latex",
|
||||
"写出文件": "Write out the file",
|
||||
"当前报错的latex代码处于第": "The current error in the LaTeX code is on line",
|
||||
"主程序即将开始": "Main program is about to start",
|
||||
"详情信息见requirements.txt": "See details in requirements.txt",
|
||||
"释放线程锁": "Release thread lock",
|
||||
"由于最为关键的转化PDF编译失败": "Due to the critical failure of PDF conversion and compilation",
|
||||
"即将退出": "Exiting soon",
|
||||
"尝试下载": "Attempting to download",
|
||||
"删除整行的空注释": "Remove empty comments from the entire line",
|
||||
"也找不到": "Not found either",
|
||||
"从一批文件": "From a batch of files",
|
||||
"编译结束": "Compilation finished",
|
||||
"调用缓存": "Calling cache",
|
||||
"只有GenerateImage和生成图像相关": "Only GenerateImage and image generation related",
|
||||
"待处理的word文档路径": "Path of the word document to be processed",
|
||||
"是否在提交时自动清空输入框": "Whether to automatically clear the input box upon submission",
|
||||
"检查结果": "Check the result",
|
||||
"生成时间戳": "Generate a timestamp",
|
||||
"编译原始PDF": "Compile the original PDF",
|
||||
"填入ENGINE": "Fill in ENGINE",
|
||||
"填入api版本": "Fill in the API version",
|
||||
"中文Bing版": "Chinese Bing version",
|
||||
"当前支持的格式包括": "Currently supported formats include"
|
||||
}
|
||||
@ -90,11 +90,12 @@
|
||||
|
||||
到现在为止,申请操作就完成了,需要记下来的有下面几个东西:
|
||||
|
||||
● 密钥(1或2都可以)
|
||||
● 密钥(对应AZURE_API_KEY,1或2都可以)
|
||||
|
||||
● 终结点
|
||||
● 终结点 (对应AZURE_ENDPOINT)
|
||||
|
||||
● 部署名(对应AZURE_ENGINE,不是模型名)
|
||||
|
||||
● 部署名(不是模型名)
|
||||
|
||||
# 修改 config.py
|
||||
|
||||
@ -102,50 +103,14 @@
|
||||
AZURE_ENDPOINT = "填入终结点"
|
||||
AZURE_API_KEY = "填入azure openai api的密钥"
|
||||
AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改
|
||||
AZURE_ENGINE = "填入部署名"
|
||||
|
||||
```
|
||||
# API的使用
|
||||
|
||||
接下来就是具体怎么使用API了,还是可以参考官方文档:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python)
|
||||
|
||||
和openai自己的api调用有点类似,都需要安装openai库,不同的是调用方式
|
||||
|
||||
```
|
||||
import openai
|
||||
openai.api_type = "azure" #固定格式,无需修改
|
||||
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT") #这里填入“终结点”
|
||||
openai.api_version = "2023-05-15" #固定格式,无需修改
|
||||
openai.api_key = os.getenv("AZURE_OPENAI_KEY") #这里填入“密钥1”或“密钥2”
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
engine="gpt-35-turbo", #这里填入的不是模型名,是部署名
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Does Azure OpenAI support customer managed keys?"},
|
||||
{"role": "assistant", "content": "Yes, customer managed keys are supported by Azure OpenAI."},
|
||||
{"role": "user", "content": "Do other Azure Cognitive Services support this too?"}
|
||||
]
|
||||
)
|
||||
|
||||
print(response)
|
||||
print(response['choices'][0]['message']['content'])
|
||||
AZURE_ENGINE = "填入部署名" # 见上图
|
||||
|
||||
```
|
||||
|
||||
需要注意的是:
|
||||
|
||||
1. engine那里填入的是部署名,不是模型名
|
||||
|
||||
2. 通过openai库获得的这个 response 和通过 request 库访问 url 获得的 response 不同,不需要 decode,已经是解析好的 json 了,直接根据键值读取即可。
|
||||
|
||||
更细节的使用方法,详见官方API文档。
|
||||
|
||||
# 关于费用
|
||||
|
||||
Azure OpenAI API 还是需要一些费用的(免费订阅只有1个月有效期),费用如下:
|
||||
|
||||

|
||||
Azure OpenAI API 还是需要一些费用的(免费订阅只有1个月有效期)
|
||||
|
||||
具体可以可以看这个网址 :[Azure OpenAI 服务 - 定价| Microsoft Azure](https://azure.microsoft.com/zh-cn/pricing/details/cognitive-services/openai-service/?cdn=disable)
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ try {
|
||||
live2d_settings['waifuTipsSize'] = '187x52';
|
||||
live2d_settings['canSwitchModel'] = true;
|
||||
live2d_settings['canSwitchTextures'] = true;
|
||||
live2d_settings['canSwitchHitokoto'] = true;
|
||||
live2d_settings['canSwitchHitokoto'] = false;
|
||||
live2d_settings['canTakeScreenshot'] = false;
|
||||
live2d_settings['canTurnToHomePage'] = false;
|
||||
live2d_settings['canTurnToAboutPage'] = false;
|
||||
|
||||
@ -34,10 +34,10 @@
|
||||
"2": ["来自 Potion Maker 的 Tia 酱 ~"]
|
||||
},
|
||||
"hitokoto_api_message": {
|
||||
"lwl12.com": ["这句一言来自 <span style=\"color:#ff99da;\">『{source}』</span>", ",是 <span style=\"color:#ff99da;\">{creator}</span> 投稿的", "。"],
|
||||
"fghrsh.net": ["这句一言出处是 <span style=\"color:#ff99da;\">『{source}』</span>,是 <span style=\"color:#ff99da;\">FGHRSH</span> 在 {date} 收藏的!"],
|
||||
"jinrishici.com": ["这句诗词出自 <span style=\"color:#ff99da;\">《{title}》</span>,是 {dynasty}诗人 {author} 创作的!"],
|
||||
"hitokoto.cn": ["这句一言来自 <span style=\"color:#ff99da;\">『{source}』</span>,是 <span style=\"color:#ff99da;\">{creator}</span> 在 hitokoto.cn 投稿的。"]
|
||||
"lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
|
||||
"fghrsh.net": ["这句一言出处是 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">FGHRSH</span> 在 {date} 收藏的!"],
|
||||
"jinrishici.com": ["这句诗词出自 <span style=\"color:#0099cc;\">《{title}》</span>,是 {dynasty}诗人 {author} 创作的!"],
|
||||
"hitokoto.cn": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">{creator}</span> 在 hitokoto.cn 投稿的。"]
|
||||
}
|
||||
},
|
||||
"mouseover": [
|
||||
|
||||
778
func_box.py
778
func_box.py
@ -1,778 +0,0 @@
|
||||
#! .\venv\
|
||||
# encoding: utf-8
|
||||
# @Time : 2023/4/18
|
||||
# @Author : Spike
|
||||
# @Descr :
|
||||
import ast
|
||||
import copy
|
||||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import os.path
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import Levenshtein
|
||||
import psutil
|
||||
import re
|
||||
import tempfile
|
||||
import shutil
|
||||
from contextlib import ExitStack
|
||||
import logging
|
||||
import yaml
|
||||
import requests
|
||||
import tiktoken
|
||||
logger = logging
|
||||
from sklearn.feature_extraction.text import CountVectorizer
|
||||
import numpy as np
|
||||
from scipy.linalg import norm
|
||||
import pyperclip
|
||||
import random
|
||||
import gradio as gr
|
||||
import toolbox
|
||||
from prompt_generator import SqliteHandle
|
||||
from bs4 import BeautifulSoup
|
||||
import copy
|
||||
|
||||
"""contextlib 是 Python 标准库中的一个模块,提供了一些工具函数和装饰器,用于支持编写上下文管理器和处理上下文的常见任务,例如资源管理、异常处理等。
|
||||
官网:https://docs.python.org/3/library/contextlib.html"""
|
||||
|
||||
|
||||
class Shell(object):
|
||||
def __init__(self, args, stream=False):
|
||||
self.args = args
|
||||
self.subp = subprocess.Popen(args, shell=True,
|
||||
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, encoding='utf-8',
|
||||
errors='ignore', close_fds=True)
|
||||
self.__stream = stream
|
||||
self.__temp = ''
|
||||
|
||||
def read(self):
|
||||
logger.debug(f'The command being executed is: "{self.args}"')
|
||||
if self.__stream:
|
||||
sysout = self.subp.stdout
|
||||
try:
|
||||
with sysout as std:
|
||||
for i in std:
|
||||
logger.info(i.rstrip())
|
||||
self.__temp += i
|
||||
except KeyboardInterrupt as p:
|
||||
return 3, self.__temp + self.subp.stderr.read()
|
||||
finally:
|
||||
return 3, self.__temp + self.subp.stderr.read()
|
||||
else:
|
||||
sysout = self.subp.stdout.read()
|
||||
syserr = self.subp.stderr.read()
|
||||
self.subp.stdin
|
||||
if sysout:
|
||||
logger.debug(f"{self.args} \n{sysout}")
|
||||
return 1, sysout
|
||||
elif syserr:
|
||||
logger.error(f"{self.args} \n{syserr}")
|
||||
return 0, syserr
|
||||
else:
|
||||
logger.debug(f"{self.args} \n{[sysout], [sysout]}")
|
||||
return 2, '\n{}\n{}'.format(sysout, sysout)
|
||||
|
||||
def sync(self):
|
||||
logger.debug('The command being executed is: "{}"'.format(self.args))
|
||||
for i in self.subp.stdout:
|
||||
logger.debug(i.rstrip())
|
||||
self.__temp += i
|
||||
yield self.__temp
|
||||
for i in self.subp.stderr:
|
||||
logger.debug(i.rstrip())
|
||||
self.__temp += i
|
||||
yield self.__temp
|
||||
|
||||
|
||||
def timeStatistics(func):
|
||||
"""
|
||||
统计函数执行时常的装饰器
|
||||
"""
|
||||
|
||||
def statistics(*args, **kwargs):
|
||||
startTiem = time.time()
|
||||
obj = func(*args, **kwargs)
|
||||
endTiem = time.time()
|
||||
ums = startTiem - endTiem
|
||||
print('func:{} > Time-consuming: {}'.format(func, ums))
|
||||
return obj
|
||||
|
||||
return statistics
|
||||
|
||||
|
||||
def copy_temp_file(file):
|
||||
if os.path.exists(file):
|
||||
exdir = tempfile.mkdtemp()
|
||||
temp_ = shutil.copy(file, os.path.join(exdir, os.path.basename(file)))
|
||||
return temp_
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def md5_str(st):
|
||||
# 创建一个 MD5 对象
|
||||
md5 = hashlib.md5()
|
||||
# 更新 MD5 对象的内容
|
||||
md5.update(str(st).encode())
|
||||
# 获取加密后的结果
|
||||
result = md5.hexdigest()
|
||||
return result
|
||||
|
||||
|
||||
def html_tag_color(tag, color=None, font='black'):
|
||||
"""
|
||||
将文本转换为带有高亮提示的html代码
|
||||
"""
|
||||
if not color:
|
||||
rgb = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
|
||||
color = f"rgb{rgb}"
|
||||
tag = f'<span style="background-color: {color}; font-weight: bold; color: {font}"> {tag} </span>'
|
||||
return tag
|
||||
|
||||
def html_a_blank(__href, name=''):
|
||||
if not name:
|
||||
name = __href
|
||||
a = f'<a href="{__href}" target="_blank" class="svelte-xrr240">{name}</a>'
|
||||
return a
|
||||
|
||||
def html_view_blank(__href, file_name=''):
|
||||
if os.path.exists(__href):
|
||||
__href = f'/file={__href}'
|
||||
if not file_name:
|
||||
file_name = __href.split('/')[-1]
|
||||
a = f'<a href="{__href}" target="_blank" class="svelte-xrr240">{file_name}</a>'
|
||||
return a
|
||||
|
||||
def html_iframe_code(html_file):
|
||||
proxy, = toolbox.get_conf('LOCAL_PORT')
|
||||
html_file = f'http://{ipaddr()}:{proxy}/file={html_file}'
|
||||
ifr = f'<iframe width="100%" height="500px" frameborder="0" src="{html_file}"></iframe>'
|
||||
return ifr
|
||||
|
||||
|
||||
def html_download_blank(__href, file_name='temp', dir_name=''):
|
||||
if os.path.exists(__href):
|
||||
__href = f'/file={__href}'
|
||||
if not dir_name:
|
||||
dir_name = file_name
|
||||
a = f'<a href="{__href}" target="_blank" download="{dir_name}" class="svelte-xrr240">{file_name}</a>'
|
||||
return a
|
||||
|
||||
def html_local_img(__file):
|
||||
a = f'<div align="center"><img src="file={__file}"></div>'
|
||||
return a
|
||||
|
||||
def ipaddr():
|
||||
# 获取本地ipx
|
||||
ip = psutil.net_if_addrs()
|
||||
for i in ip:
|
||||
if ip[i][0][3]:
|
||||
return ip[i][0][1]
|
||||
|
||||
|
||||
def encryption_str(txt: str):
|
||||
"""(关键字)(加密间隔)匹配机制(关键字间隔)"""
|
||||
txt = str(txt)
|
||||
pattern = re.compile(rf"(Authorization|WPS-Sid|Cookie)(:|\s+)\s*(\S+)[\s\S]*?(?=\n|$|\s)", re.IGNORECASE)
|
||||
result = pattern.sub(lambda x: x.group(1) + ": XXXXXXXX", txt)
|
||||
return result
|
||||
|
||||
|
||||
def tree_out(dir=os.path.dirname(__file__), line=2, more=''):
|
||||
"""
|
||||
获取本地文件的树形结构转化为Markdown代码文本
|
||||
"""
|
||||
out = Shell(f'tree {dir} -F -I "__*|.*|venv|*.png|*.xlsx" -L {line} {more}').read()[1]
|
||||
localfile = os.path.join(os.path.dirname(__file__), '.tree.md')
|
||||
with open(localfile, 'w') as f:
|
||||
f.write('```\n')
|
||||
ll = out.splitlines()
|
||||
for i in range(len(ll)):
|
||||
if i == 0:
|
||||
f.write(ll[i].split('/')[-2] + '\n')
|
||||
else:
|
||||
f.write(ll[i] + '\n')
|
||||
f.write('```\n')
|
||||
|
||||
|
||||
def chat_history(log: list, split=0):
|
||||
"""
|
||||
auto_gpt 使用的代码,后续会迁移
|
||||
"""
|
||||
if split:
|
||||
log = log[split:]
|
||||
chat = ''
|
||||
history = ''
|
||||
for i in log:
|
||||
chat += f'{i[0]}\n\n'
|
||||
history += f'{i[1]}\n\n'
|
||||
return chat, history
|
||||
|
||||
|
||||
def df_similarity(s1, s2):
|
||||
"""弃用,会警告,这个库不会用"""
|
||||
def add_space(s):
|
||||
return ' '.join(list(s))
|
||||
|
||||
# 将字中间加入空格
|
||||
s1, s2 = add_space(s1), add_space(s2)
|
||||
# 转化为TF矩阵
|
||||
cv = CountVectorizer(tokenizer=lambda s: s.split())
|
||||
corpus = [s1, s2]
|
||||
vectors = cv.fit_transform(corpus).toarray()
|
||||
# 计算TF系数
|
||||
return np.dot(vectors[0], vectors[1]) / (norm(vectors[0]) * norm(vectors[1]))
|
||||
|
||||
|
||||
def check_json_format(file):
|
||||
"""
|
||||
检查上传的Json文件是否符合规范
|
||||
"""
|
||||
new_dict = {}
|
||||
data = JsonHandle(file).load()
|
||||
if type(data) is list and len(data) > 0:
|
||||
if type(data[0]) is dict:
|
||||
for i in data:
|
||||
new_dict.update({i['act']: i['prompt']})
|
||||
return new_dict
|
||||
|
||||
|
||||
def json_convert_dict(file):
|
||||
"""
|
||||
批量将json转换为字典
|
||||
"""
|
||||
new_dict = {}
|
||||
for root, dirs, files in os.walk(file):
|
||||
for f in files:
|
||||
if f.startswith('prompt') and f.endswith('json'):
|
||||
new_dict.update(check_json_format(f))
|
||||
return new_dict
|
||||
|
||||
|
||||
def draw_results(txt, prompt: gr.Dataset, percent, switch, ipaddr: gr.Request):
|
||||
"""
|
||||
绘制搜索结果
|
||||
Args:
|
||||
txt (str): 过滤文本
|
||||
prompt : 原始的dataset对象
|
||||
percent (int): TF系数,用于计算文本相似度
|
||||
switch (list): 过滤个人或所有人的Prompt
|
||||
ipaddr : 请求人信息
|
||||
Returns:
|
||||
注册函数所需的元祖对象
|
||||
"""
|
||||
data = diff_list(txt, percent=percent, switch=switch, hosts=ipaddr.client.host)
|
||||
prompt.samples = data
|
||||
return prompt.update(samples=data, visible=True), prompt
|
||||
|
||||
|
||||
def diff_list(txt='', percent=0.70, switch: list = None, lst: dict = None, sp=15, hosts=''):
|
||||
"""
|
||||
按照搜索结果统计相似度的文本,两组文本相似度>70%的将统计在一起,取最长的作为key
|
||||
Args:
|
||||
txt (str): 过滤文本
|
||||
percent (int): TF系数,用于计算文本相似度
|
||||
switch (list): 过滤个人或所有人的Prompt
|
||||
lst:指定一个列表或字典
|
||||
sp: 截取展示的文本长度
|
||||
hosts : 请求人的ip
|
||||
Returns:
|
||||
返回一个列表
|
||||
"""
|
||||
count_dict = {}
|
||||
is_all = toolbox.get_conf('prompt_list')[0]['key'][1]
|
||||
if not lst:
|
||||
lst = {}
|
||||
tabs = SqliteHandle().get_tables()
|
||||
if is_all in switch:
|
||||
lst.update(SqliteHandle(f"ai_common_{hosts}").get_prompt_value(txt))
|
||||
else:
|
||||
for tab in tabs:
|
||||
if tab.startswith('ai_common'):
|
||||
lst.update(SqliteHandle(f"{tab}").get_prompt_value(txt))
|
||||
lst.update(SqliteHandle(f"ai_private_{hosts}").get_prompt_value(txt))
|
||||
# diff 数据,根据precent系数归类数据
|
||||
str_ = time.time()
|
||||
def tf_factor_calcul(i):
|
||||
found = False
|
||||
dict_copy = count_dict.copy()
|
||||
for key in dict_copy.keys():
|
||||
str_tf = Levenshtein.jaro_winkler(i, key)
|
||||
if str_tf >= percent:
|
||||
if len(i) > len(key):
|
||||
count_dict[i] = count_dict.copy()[key] + 1
|
||||
count_dict.pop(key)
|
||||
else:
|
||||
count_dict[key] += 1
|
||||
found = True
|
||||
break
|
||||
if not found: count_dict[i] = 1
|
||||
with ThreadPoolExecutor(100) as executor:
|
||||
executor.map(tf_factor_calcul, lst)
|
||||
print('计算耗时', time.time()-str_)
|
||||
sorted_dict = sorted(count_dict.items(), key=lambda x: x[1], reverse=True)
|
||||
if switch:
|
||||
sorted_dict += prompt_retrieval(is_all=switch, hosts=hosts, search=True)
|
||||
dateset_list = []
|
||||
for key in sorted_dict:
|
||||
# 开始匹配关键字
|
||||
index = str(key[0]).lower().find(txt.lower())
|
||||
index_ = str(key[1]).lower().find(txt.lower())
|
||||
if index != -1 or index_ != -1:
|
||||
if index == -1: index = index_ # 增加搜索prompt 名称
|
||||
# sp=split 用于判断在哪里启动、在哪里断开
|
||||
if index - sp > 0:
|
||||
start = index - sp
|
||||
else:
|
||||
start = 0
|
||||
if len(key[0]) > sp * 2:
|
||||
end = key[0][-sp:]
|
||||
else:
|
||||
end = ''
|
||||
# 判断有没有传需要匹配的字符串,有则筛选、无则全返
|
||||
if txt == '' and len(key[0]) >= sp:
|
||||
show = key[0][0:sp] + " . . . " + end
|
||||
show = show.replace('<', '')
|
||||
elif txt == '' and len(key[0]) < sp:
|
||||
show = key[0][0:sp]
|
||||
show = show.replace('<', '')
|
||||
else:
|
||||
show = str(key[0][start:index + sp]).replace('<', '').replace(txt, html_tag_color(txt))
|
||||
show += f" {html_tag_color(' X ' + str(key[1]))}"
|
||||
if lst.get(key[0]):
|
||||
be_value = lst[key[0]]
|
||||
else:
|
||||
be_value = None
|
||||
value = be_value
|
||||
dateset_list.append([show, key[0], value, key[1]])
|
||||
return dateset_list
|
||||
|
||||
|
||||
def prompt_upload_refresh(file, prompt, ipaddr: gr.Request):
|
||||
"""
|
||||
上传文件,将文件转换为字典,然后存储到数据库,并刷新Prompt区域
|
||||
Args:
|
||||
file: 上传的文件
|
||||
prompt: 原始prompt对象
|
||||
ipaddr:ipaddr用户请求信息
|
||||
Returns:
|
||||
注册函数所需的元祖对象
|
||||
"""
|
||||
hosts = ipaddr.client.host
|
||||
if file.name.endswith('json'):
|
||||
upload_data = check_json_format(file.name)
|
||||
elif file.name.endswith('yaml'):
|
||||
upload_data = YamlHandle(file.name).load()
|
||||
else:
|
||||
upload_data = {}
|
||||
if upload_data != {}:
|
||||
SqliteHandle(f'prompt_{hosts}').inset_prompt(upload_data)
|
||||
ret_data = prompt_retrieval(is_all=['个人'], hosts=hosts)
|
||||
return prompt.update(samples=ret_data, visible=True), prompt, ['个人']
|
||||
else:
|
||||
prompt.samples = [[f'{html_tag_color("数据解析失败,请检查文件是否符合规范", color="red")}', '']]
|
||||
return prompt.samples, prompt, []
|
||||
|
||||
|
||||
def prompt_retrieval(is_all, hosts='', search=False):
|
||||
"""
|
||||
上传文件,将文件转换为字典,然后存储到数据库,并刷新Prompt区域
|
||||
Args:
|
||||
is_all: prompt类型
|
||||
hosts: 查询的用户ip
|
||||
search:支持搜索,搜索时将key作为key
|
||||
Returns:
|
||||
返回一个列表
|
||||
"""
|
||||
count_dict = {}
|
||||
if '所有人' in is_all:
|
||||
for tab in SqliteHandle('ai_common').get_tables():
|
||||
if tab.startswith('prompt'):
|
||||
data = SqliteHandle(tab).get_prompt_value(None)
|
||||
if data: count_dict.update(data)
|
||||
elif '个人' in is_all:
|
||||
data = SqliteHandle(f'prompt_{hosts}').get_prompt_value(None)
|
||||
if data: count_dict.update(data)
|
||||
retrieval = []
|
||||
if count_dict != {}:
|
||||
for key in count_dict:
|
||||
if not search:
|
||||
retrieval.append([key, count_dict[key]])
|
||||
else:
|
||||
retrieval.append([count_dict[key], key])
|
||||
return retrieval
|
||||
else:
|
||||
return retrieval
|
||||
|
||||
|
||||
def prompt_reduce(is_all, prompt: gr.Dataset, ipaddr: gr.Request): # is_all, ipaddr: gr.Request
|
||||
"""
|
||||
上传文件,将文件转换为字典,然后存储到数据库,并刷新Prompt区域
|
||||
Args:
|
||||
is_all: prompt类型
|
||||
prompt: dataset原始对象
|
||||
ipaddr:请求用户信息
|
||||
Returns:
|
||||
返回注册函数所需的对象
|
||||
"""
|
||||
data = prompt_retrieval(is_all=is_all, hosts=ipaddr.client.host)
|
||||
prompt.samples = data
|
||||
return prompt.update(samples=data, visible=True), prompt, is_all
|
||||
|
||||
|
||||
def prompt_save(txt, name, prompt: gr.Dataset, ipaddr: gr.Request):
|
||||
"""
|
||||
编辑和保存Prompt
|
||||
Args:
|
||||
txt: Prompt正文
|
||||
name: Prompt的名字
|
||||
prompt: dataset原始对象
|
||||
ipaddr:请求用户信息
|
||||
Returns:
|
||||
返回注册函数所需的对象
|
||||
"""
|
||||
if txt and name:
|
||||
yaml_obj = SqliteHandle(f'prompt_{ipaddr.client.host}')
|
||||
yaml_obj.inset_prompt({name: txt})
|
||||
result = prompt_retrieval(is_all=['个人'], hosts=ipaddr.client.host)
|
||||
prompt.samples = result
|
||||
return "", "", ['个人'], prompt.update(samples=result, visible=True), prompt, gr.Tabs.update(selected='chatbot')
|
||||
elif not txt or not name:
|
||||
result = [[f'{html_tag_color("编辑框 or 名称不能为空!!!!!", color="red")}', '']]
|
||||
prompt.samples = [[f'{html_tag_color("编辑框 or 名称不能为空!!!!!", color="red")}', '']]
|
||||
return txt, name, [], prompt.update(samples=result, visible=True), prompt, gr.Tabs.update(selected='chatbot')
|
||||
|
||||
|
||||
def prompt_input(txt: str, prompt_str, name_str, index, data: gr.Dataset, tabs_index):
|
||||
"""
|
||||
点击dataset的值使用Prompt
|
||||
Args:
|
||||
txt: 输入框正文
|
||||
index: 点击的Dataset下标
|
||||
data: dataset原始对象
|
||||
Returns:
|
||||
返回注册函数所需的对象
|
||||
"""
|
||||
data_str = str(data.samples[index][1])
|
||||
data_name = str(data.samples[index][0])
|
||||
rp_str = '{{{v}}}'
|
||||
|
||||
def str_v_handle(__str):
|
||||
if data_str.find(rp_str) != -1 and __str:
|
||||
txt_temp = data_str.replace(rp_str, __str)
|
||||
elif __str:
|
||||
txt_temp = data_str + '\n' + __str
|
||||
else:
|
||||
txt_temp = data_str
|
||||
return txt_temp
|
||||
if tabs_index == 1:
|
||||
new_txt = str_v_handle(prompt_str)
|
||||
return txt, new_txt, data_name
|
||||
else:
|
||||
new_txt = str_v_handle(txt)
|
||||
return new_txt, prompt_str, name_str
|
||||
|
||||
|
||||
def copy_result(history):
|
||||
"""复制history"""
|
||||
if history != []:
|
||||
pyperclip.copy(history[-1])
|
||||
return '已将结果复制到剪切板'
|
||||
else:
|
||||
return "无对话记录,复制错误!!"
|
||||
|
||||
|
||||
def str_is_list(s):
|
||||
try:
|
||||
list_ast = ast.literal_eval(s)
|
||||
return isinstance(list_ast, list)
|
||||
except (SyntaxError, ValueError):
|
||||
return False
|
||||
|
||||
|
||||
def show_prompt_result(index, data: gr.Dataset, chatbot, pro_edit, pro_name):
|
||||
"""
|
||||
查看Prompt的对话记录结果
|
||||
Args:
|
||||
index: 点击的Dataset下标
|
||||
data: dataset原始对象
|
||||
chatbot:聊天机器人
|
||||
Returns:
|
||||
返回注册函数所需的对象
|
||||
"""
|
||||
click = data.samples[index]
|
||||
if str_is_list(click[2]):
|
||||
list_copy = eval(click[2])
|
||||
for i in range(0, len(list_copy), 2):
|
||||
if i + 1 >= len(list_copy): # 如果下标越界了,单独处理最后一个元素
|
||||
chatbot.append([list_copy[i]])
|
||||
else:
|
||||
chatbot.append([list_copy[i], list_copy[i + 1]])
|
||||
elif click[2] is None and pro_edit == '':
|
||||
pro_edit = click[1]
|
||||
pro_name = click[3]
|
||||
else:
|
||||
chatbot.append((click[1], click[2]))
|
||||
return chatbot, pro_edit, pro_name
|
||||
|
||||
|
||||
|
||||
def pattern_html(html):
|
||||
bs = BeautifulSoup(str(html), 'html.parser')
|
||||
md_message = bs.find('div', {'class': 'md-message'})
|
||||
if md_message:
|
||||
return md_message.get_text(separator='')
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def thread_write_chat(chatbot, history):
|
||||
"""
|
||||
对话记录写入数据库
|
||||
"""
|
||||
chatbot, history = copy.copy(chatbot), copy.copy(history)
|
||||
private_key = toolbox.get_conf('private_key')[0]
|
||||
chat_title = chatbot[0][1].split()
|
||||
i_say = pattern_html(chatbot[-1][0])
|
||||
if history:
|
||||
gpt_result = history
|
||||
else: # 如果历史对话不存在,那么读取对话框
|
||||
gpt_result = [pattern_html(v) for i in chatbot for v in i]
|
||||
if private_key in chat_title:
|
||||
SqliteHandle(f'ai_private_{chat_title[-2]}').inset_prompt({i_say: gpt_result})
|
||||
else:
|
||||
SqliteHandle(f'ai_common_{chat_title[-2]}').inset_prompt({i_say: gpt_result})
|
||||
|
||||
|
||||
base_path = os.path.dirname(__file__)
|
||||
prompt_path = os.path.join(base_path, 'users_data')
|
||||
users_path = os.path.join(base_path, 'private_upload')
|
||||
logs_path = os.path.join(base_path, 'gpt_log')
|
||||
|
||||
def reuse_chat(result, chatbot, history, pro_numb, say):
|
||||
"""复用对话记录"""
|
||||
if result is None or result == []:
|
||||
return chatbot, history, gr.update(), gr.update(), '', gr.Column.update()
|
||||
else:
|
||||
if pro_numb:
|
||||
chatbot += result
|
||||
history += [pattern_html(_) for i in result for _ in i]
|
||||
else:
|
||||
chatbot.append(result[-1])
|
||||
history += [pattern_html(_) for i in result[-2:] for _ in i]
|
||||
print(chatbot[-1][0])
|
||||
return chatbot, history, say, gr.Tabs.update(selected='chatbot'), '', gr.Column.update(visible=False)
|
||||
|
||||
|
||||
def num_tokens_from_string(listing: list, encoding_name: str = 'cl100k_base') -> int:
|
||||
"""Returns the number of tokens in a text string."""
|
||||
count_tokens = 0
|
||||
for i in listing:
|
||||
encoding = tiktoken.get_encoding(encoding_name)
|
||||
count_tokens += len(encoding.encode(i))
|
||||
return count_tokens
|
||||
|
||||
|
||||
def spinner_chatbot_loading(chatbot):
|
||||
loading = [''.join(['.' * random.randint(1, 5)])]
|
||||
# 将元组转换为列表并修改元素
|
||||
loading_msg = copy.deepcopy(chatbot)
|
||||
temp_list = list(loading_msg[-1])
|
||||
|
||||
temp_list[1] = pattern_html(temp_list[1]) + f'{random.choice(loading)}'
|
||||
# 将列表转换回元组并替换原始元组
|
||||
loading_msg[-1] = tuple(temp_list)
|
||||
return loading_msg
|
||||
|
||||
|
||||
def refresh_load_data(chat, history, prompt, crazy_list, request: gr.Request):
|
||||
"""
|
||||
Args:
|
||||
chat: 聊天组件
|
||||
history: 对话记录
|
||||
prompt: prompt dataset组件
|
||||
|
||||
Returns:
|
||||
预期是每次刷新页面,加载最新
|
||||
"""
|
||||
is_all = toolbox.get_conf('prompt_list')[0]['key'][0]
|
||||
data = prompt_retrieval(is_all=[is_all])
|
||||
prompt.samples = data
|
||||
selected = random.sample(crazy_list, 4)
|
||||
user_agent = request.kwargs['headers']['user-agent'].lower()
|
||||
if user_agent.find('android') != -1 or user_agent.find('iphone') != -1:
|
||||
hied_elem = gr.update(visible=False)
|
||||
else:
|
||||
hied_elem = gr.update()
|
||||
outputs = [prompt.update(samples=data, visible=True), prompt,
|
||||
chat, history, gr.Dataset.update(samples=[[i] for i in selected]), selected,
|
||||
hied_elem, hied_elem]
|
||||
return outputs
|
||||
|
||||
|
||||
|
||||
def txt_converter_json(input_string):
|
||||
try:
|
||||
if input_string.startswith("{") and input_string.endswith("}"):
|
||||
# 尝试将字符串形式的字典转换为字典对象
|
||||
dict_object = ast.literal_eval(input_string)
|
||||
else:
|
||||
# 尝试将字符串解析为JSON对象
|
||||
dict_object = json.loads(input_string)
|
||||
formatted_json_string = json.dumps(dict_object, indent=4, ensure_ascii=False)
|
||||
return formatted_json_string
|
||||
except (ValueError, SyntaxError):
|
||||
return input_string
|
||||
|
||||
|
||||
def clean_br_string(s):
|
||||
s = re.sub('<\s*br\s*/?>', '\n', s) # 使用正则表达式同时匹配<br>、<br/>、<br />、< br>和< br/>
|
||||
return s
|
||||
|
||||
|
||||
def update_btn(self,
|
||||
value: str = None,
|
||||
variant: str = None,
|
||||
visible: bool = None,
|
||||
interactive: bool = None,
|
||||
elem_id: str = None,
|
||||
label: str = None
|
||||
):
|
||||
if not variant: variant = self.variant
|
||||
if not visible: visible = self.visible
|
||||
if not value: value = self.value
|
||||
if not interactive: interactive = self.interactive
|
||||
if not elem_id: elem_id = self.elem_id
|
||||
if not elem_id: label = self.label
|
||||
return {
|
||||
"variant": variant,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
'elem_id': elem_id,
|
||||
'label': label,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
def update_txt(self,
|
||||
value: str = None,
|
||||
lines: int = None,
|
||||
max_lines: int = None,
|
||||
placeholder: str = None,
|
||||
label: str = None,
|
||||
show_label: bool = None,
|
||||
visible: bool = None,
|
||||
interactive: bool = None,
|
||||
type: str = None,
|
||||
elem_id: str = None
|
||||
):
|
||||
|
||||
return {
|
||||
"lines": self.lines,
|
||||
"max_lines": self.max_lines,
|
||||
"placeholder": self.placeholder,
|
||||
"label": self.label,
|
||||
"show_label": self.show_label,
|
||||
"visible": self.visible,
|
||||
"value": self.value,
|
||||
"type": self.type,
|
||||
"interactive": self.interactive,
|
||||
"elem_id": elem_id,
|
||||
"__type__": "update",
|
||||
|
||||
}
|
||||
|
||||
|
||||
def get_html(filename):
|
||||
path = os.path.join(base_path, "docs/assets", "html", filename)
|
||||
if os.path.exists(path):
|
||||
with open(path, encoding="utf8") as file:
|
||||
return file.read()
|
||||
return ""
|
||||
|
||||
|
||||
def git_log_list():
|
||||
ll = Shell("git log --pretty=format:'%s | %h' -n 10").read()[1].splitlines()
|
||||
|
||||
return [i.split('|') for i in ll if 'branch' not in i][:5]
|
||||
|
||||
import qrcode
|
||||
from PIL import Image, ImageDraw
|
||||
def qr_code_generation(data, icon_path=None, file_name='qc_icon.png'):
|
||||
# 创建qrcode对象
|
||||
qr = qrcode.QRCode(version=2, error_correction=qrcode.constants.ERROR_CORRECT_Q, box_size=10, border=2,)
|
||||
qr.add_data(data)
|
||||
# 创建二维码图片
|
||||
img = qr.make_image()
|
||||
# 图片转换为RGBA格式
|
||||
img = img.convert('RGBA')
|
||||
# 返回二维码图片的大小
|
||||
img_w, img_h = img.size
|
||||
# 打开logo
|
||||
if not icon_path:
|
||||
icon_path = os.path.join(base_path, 'docs/assets/PLAI.jpeg')
|
||||
logo = Image.open(icon_path)
|
||||
# logo大小为二维码的四分之一
|
||||
logo_w = img_w // 4
|
||||
logo_h = img_w // 4
|
||||
# 修改logo图片大小
|
||||
logo = logo.resize((logo_w, logo_h), Image.LANCZOS) # or Image.Resampling.LANCZOS
|
||||
# 把logo放置在二维码中间
|
||||
w = (img_w - logo_w) // 2
|
||||
h = (img_h - logo_h) // 2
|
||||
img.paste(logo, (w, h))
|
||||
qr_path = os.path.join(logs_path, 'file_name')
|
||||
img.save()
|
||||
return qr_path
|
||||
|
||||
|
||||
class YamlHandle:
|
||||
|
||||
def __init__(self, file=os.path.join(prompt_path, 'ai_common.yaml')):
|
||||
if not os.path.exists(file):
|
||||
Shell(f'touch {file}').read()
|
||||
self.file = file
|
||||
self._load = self.load()
|
||||
|
||||
def load(self) -> dict:
|
||||
with open(file=self.file, mode='r') as f:
|
||||
data = yaml.safe_load(f)
|
||||
return data
|
||||
|
||||
def update(self, key, value):
|
||||
date = self._load
|
||||
if not date:
|
||||
date = {}
|
||||
date[key] = value
|
||||
with open(file=self.file, mode='w') as f:
|
||||
yaml.dump(date, f, allow_unicode=True)
|
||||
return date
|
||||
|
||||
def dump_dict(self, new_dict):
|
||||
date = self._load
|
||||
if not date:
|
||||
date = {}
|
||||
date.update(new_dict)
|
||||
with open(file=self.file, mode='w') as f:
|
||||
yaml.dump(date, f, allow_unicode=True)
|
||||
return date
|
||||
|
||||
|
||||
class JsonHandle:
|
||||
|
||||
def __init__(self, file):
|
||||
self.file = file
|
||||
|
||||
def load(self) -> object:
|
||||
with open(self.file, 'r') as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pass
|
||||
8
main.py
8
main.py
@ -130,9 +130,9 @@ def main():
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
|
||||
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
|
||||
return ret
|
||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, clearBtn, clearBtn2, plugin_advanced_arg] )
|
||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
|
||||
# 整理反复出现的控件句柄组合
|
||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||
output_combo = [cookies, chatbot, history, status]
|
||||
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
||||
# 提交按钮、重置按钮
|
||||
@ -155,7 +155,7 @@ def main():
|
||||
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
||||
cancel_handles.append(click_handle)
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt ], [chatbot, txt])
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2])
|
||||
# 函数插件-固定按钮区
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
@ -174,7 +174,7 @@ def main():
|
||||
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
|
||||
def on_md_dropdown_changed(k):
|
||||
return {chatbot: gr.update(label="当前模型:"+k)}
|
||||
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot])
|
||||
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
|
||||
# 随变按钮的回调函数注册
|
||||
def route(k, *args, **kwargs):
|
||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||
|
||||
@ -33,7 +33,7 @@ import pickle
|
||||
import time
|
||||
|
||||
CACHE_FOLDER = "gpt_log"
|
||||
blacklist = ['multi-language', 'gpt_log', '.git', 'private_upload', 'multi_language.py']
|
||||
blacklist = ['multi-language', 'gpt_log', '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv']
|
||||
|
||||
# LANG = "TraditionalChinese"
|
||||
# TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #."
|
||||
@ -301,6 +301,7 @@ def step_1_core_key_translate():
|
||||
elif isinstance(node, ast.ImportFrom):
|
||||
for n in node.names:
|
||||
if contains_chinese(n.name): syntax.append(n.name)
|
||||
# if node.module is None: print(node.module)
|
||||
for k in node.module.split('.'):
|
||||
if contains_chinese(k): syntax.append(k)
|
||||
return syntax
|
||||
@ -310,6 +311,7 @@ def step_1_core_key_translate():
|
||||
for root, dirs, files in os.walk(directory_path):
|
||||
if any([b in root for b in blacklist]):
|
||||
continue
|
||||
print(files)
|
||||
for file in files:
|
||||
if file.endswith('.py'):
|
||||
file_path = os.path.join(root, file)
|
||||
@ -505,6 +507,6 @@ def step_2_core_key_translate():
|
||||
with open(file_path_new, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
os.remove(file_path)
|
||||
|
||||
step_1_core_key_translate()
|
||||
step_2_core_key_translate()
|
||||
print('Finished, checkout generated results at ./multi-language/')
|
||||
@ -1,102 +0,0 @@
|
||||
#! .\venv\
|
||||
# encoding: utf-8
|
||||
# @Time : 2023/4/19
|
||||
# @Author : Spike
|
||||
# @Descr :
|
||||
import os.path
|
||||
import sqlite3
|
||||
import threading
|
||||
import functools
|
||||
import func_box
|
||||
# 连接到数据库
|
||||
base_path = os.path.dirname(__file__)
|
||||
prompt_path = os.path.join(base_path, 'users_data')
|
||||
|
||||
|
||||
def connect_db_close(cls_method):
|
||||
@functools.wraps(cls_method)
|
||||
def wrapper(cls=None, *args, **kwargs):
|
||||
cls._connect_db()
|
||||
result = cls_method(cls, *args, **kwargs)
|
||||
cls._close_db()
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
class SqliteHandle:
|
||||
def __init__(self, table='ai_common', database='ai_prompt.db'):
|
||||
self.__database = database
|
||||
self.__connect = sqlite3.connect(os.path.join(prompt_path, self.__database))
|
||||
self.__cursor = self.__connect.cursor()
|
||||
self.__table = table
|
||||
if self.__table not in self.get_tables():
|
||||
self.create_tab()
|
||||
|
||||
def new_connect_db(self):
|
||||
"""多线程操作时,每个线程新建独立的connect"""
|
||||
self.__connect = sqlite3.connect(os.path.join(prompt_path, self.__database))
|
||||
self.__cursor = self.__connect.cursor()
|
||||
|
||||
def new_close_db(self):
|
||||
self.__cursor.close()
|
||||
self.__connect.close()
|
||||
|
||||
def create_tab(self):
|
||||
self.__cursor.execute(f"CREATE TABLE `{self.__table}` ('prompt' TEXT UNIQUE, 'result' TEXT)")
|
||||
|
||||
def get_tables(self):
|
||||
all_tab = []
|
||||
result = self.__cursor.execute("SELECT name FROM sqlite_master WHERE type = 'table';")
|
||||
for tab in result:
|
||||
all_tab.append(tab[0])
|
||||
return all_tab
|
||||
|
||||
def get_prompt_value(self, find=None):
|
||||
temp_all = {}
|
||||
if find:
|
||||
result = self.__cursor.execute(f"SELECT prompt, result FROM `{self.__table}` WHERE prompt LIKE '%{find}%'").fetchall()
|
||||
else:
|
||||
result = self.__cursor.execute(f"SELECT prompt, result FROM `{self.__table}`").fetchall()
|
||||
for row in result:
|
||||
temp_all[row[0]] = row[1]
|
||||
return temp_all
|
||||
|
||||
def inset_prompt(self, prompt: dict):
|
||||
for key in prompt:
|
||||
self.__cursor.execute(f"REPLACE INTO `{self.__table}` (prompt, result) VALUES (?, ?);", (str(key), str(prompt[key])))
|
||||
self.__connect.commit()
|
||||
|
||||
def delete_prompt(self, name):
|
||||
self.__cursor.execute(f"DELETE from `{self.__table}` where prompt LIKE '{name}'")
|
||||
self.__connect.commit()
|
||||
|
||||
def delete_tabls(self, tab):
|
||||
self.__cursor.execute(f"DROP TABLE `{tab}`;")
|
||||
self.__connect.commit()
|
||||
|
||||
def find_prompt_result(self, name):
|
||||
query = self.__cursor.execute(f"SELECT result FROM `{self.__table}` WHERE prompt LIKE '{name}'").fetchall()
|
||||
if query == []:
|
||||
query = self.__cursor.execute(f"SELECT result FROM `prompt_127.0.0.1` WHERE prompt LIKE '{name}'").fetchall()
|
||||
return query[0][0]
|
||||
else:
|
||||
return query[0][0]
|
||||
|
||||
def cp_db_data(incloud_tab='prompt'):
|
||||
sql_ll = sqlite_handle(database='ai_prompt_cp.db')
|
||||
tabs = sql_ll.get_tables()
|
||||
for i in tabs:
|
||||
if str(i).startswith(incloud_tab):
|
||||
old_data = sqlite_handle(table=i, database='ai_prompt_cp.db').get_prompt_value()
|
||||
sqlite_handle(table=i).inset_prompt(old_data)
|
||||
|
||||
def inset_127_prompt():
|
||||
sql_handle = sqlite_handle(table='prompt_127.0.0.1')
|
||||
prompt_json = os.path.join(prompt_path, 'prompts-PlexPt.json')
|
||||
data_list = func_box.JsonHandle(prompt_json).load()
|
||||
for i in data_list:
|
||||
sql_handle.inset_prompt(prompt={i['act']: i['prompt']})
|
||||
|
||||
sqlite_handle = SqliteHandle
|
||||
if __name__ == '__main__':
|
||||
cp_db_data()
|
||||
@ -13,11 +13,8 @@ from functools import lru_cache
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from toolbox import get_conf, trimmed_format_exc
|
||||
|
||||
from request_llm.bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||
from request_llm.bridge_chatgpt import predict as chatgpt_ui
|
||||
|
||||
from .bridge_azure_test import predict_no_ui_long_connection as azure_noui
|
||||
from .bridge_azure_test import predict as azure_ui
|
||||
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||
from .bridge_chatgpt import predict as chatgpt_ui
|
||||
|
||||
from .bridge_azure_test import predict_no_ui_long_connection as azure_noui
|
||||
from .bridge_azure_test import predict as azure_ui
|
||||
@ -25,9 +22,6 @@ from .bridge_azure_test import predict as azure_ui
|
||||
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
|
||||
from .bridge_chatglm import predict as chatglm_ui
|
||||
|
||||
from .bridge_newbing import predict_no_ui_long_connection as newbing_noui
|
||||
from .bridge_newbing import predict as newbing_ui
|
||||
|
||||
# from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
|
||||
# from .bridge_tgui import predict as tgui_ui
|
||||
|
||||
@ -54,11 +48,10 @@ class LazyloadTiktoken(object):
|
||||
return encoder.decode(*args, **kwargs)
|
||||
|
||||
# Endpoint 重定向
|
||||
API_URL_REDIRECT, PROXY_API_URL = get_conf("API_URL_REDIRECT", 'PROXY_API_URL')
|
||||
API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
|
||||
openai_endpoint = "https://api.openai.com/v1/chat/completions"
|
||||
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
|
||||
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
||||
proxy_endpoint = PROXY_API_URL
|
||||
# 兼容旧版的配置
|
||||
try:
|
||||
API_URL, = get_conf("API_URL")
|
||||
@ -73,7 +66,6 @@ if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_e
|
||||
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
|
||||
|
||||
|
||||
|
||||
# 获取tokenizer
|
||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||
tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
|
||||
@ -127,18 +119,9 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
# azure openai
|
||||
"azure-gpt35":{
|
||||
"fn_with_ui": azure_ui,
|
||||
"fn_without_ui": azure_noui,
|
||||
"endpoint": get_conf("AZURE_ENDPOINT"),
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# azure openai
|
||||
"azure-gpt35":{
|
||||
"azure-gpt-3.5":{
|
||||
"fn_with_ui": azure_ui,
|
||||
"fn_without_ui": azure_noui,
|
||||
"endpoint": get_conf("AZURE_ENDPOINT"),
|
||||
@ -161,9 +144,9 @@ model_info = {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": api2d_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
"max_token": 8192,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
# 将 chatglm 直接对齐到 chatglm2
|
||||
@ -183,16 +166,6 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# newbing
|
||||
"newbing": {
|
||||
"fn_with_ui": newbing_ui,
|
||||
"fn_without_ui": newbing_noui,
|
||||
"endpoint": newbing_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
@ -281,6 +254,23 @@ if "newbing-free" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||
try:
|
||||
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
|
||||
from .bridge_newbingfree import predict as newbingfree_ui
|
||||
# claude
|
||||
model_info.update({
|
||||
"newbing": {
|
||||
"fn_with_ui": newbingfree_ui,
|
||||
"fn_without_ui": newbingfree_noui,
|
||||
"endpoint": newbing_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
|
||||
def LLM_CATCH_EXCEPTION(f):
|
||||
"""
|
||||
|
||||
@ -14,7 +14,8 @@ import traceback
|
||||
import importlib
|
||||
import openai
|
||||
import time
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
# 读取config.py文件中关于AZURE OPENAI API的信息
|
||||
from toolbox import get_conf, update_ui, clip_history, trimmed_format_exc
|
||||
@ -43,7 +44,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
print(llm_kwargs["llm_model"])
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
@ -56,7 +56,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
|
||||
payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||
|
||||
@ -64,20 +63,22 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
|
||||
try:
|
||||
openai.api_type = "azure"
|
||||
openai.api_version = AZURE_API_VERSION
|
||||
openai.api_base = AZURE_ENDPOINT
|
||||
openai.api_key = AZURE_API_KEY
|
||||
response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break
|
||||
|
||||
except openai.error.AuthenticationError:
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
chatbot[-1] = [chatbot[-1][0], tb_str]
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="openai返回错误") # 刷新界面
|
||||
return
|
||||
except:
|
||||
retry += 1
|
||||
chatbot[-1] = ((chatbot[-1][0], "获取response失败,重试中。。。"))
|
||||
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||
traceback.print_exc()
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||
|
||||
gpt_replying_buffer = ""
|
||||
is_head_of_the_stream = True
|
||||
@ -141,20 +142,17 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||
retry = 0
|
||||
while True:
|
||||
|
||||
try:
|
||||
openai.api_type = "azure"
|
||||
openai.api_version = AZURE_API_VERSION
|
||||
openai.api_base = AZURE_ENDPOINT
|
||||
openai.api_key = AZURE_API_KEY
|
||||
response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break
|
||||
|
||||
except:
|
||||
except:
|
||||
retry += 1
|
||||
traceback.print_exc()
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||
|
||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||
|
||||
stream_response = response
|
||||
result = ''
|
||||
@ -164,19 +162,14 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
break
|
||||
except:
|
||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||
|
||||
if len(chunk)==0: continue
|
||||
if not chunk.startswith('data:'):
|
||||
error_msg = get_full_error(chunk, stream_response)
|
||||
if "reduce the length" in error_msg:
|
||||
raise ConnectionAbortedError("AZURE OPENAI API拒绝了请求:" + error_msg)
|
||||
else:
|
||||
raise RuntimeError("AZURE OPENAI API拒绝了请求:" + error_msg)
|
||||
if ('data: [DONE]' in chunk): break
|
||||
|
||||
delta = chunk["delta"]
|
||||
if len(delta) == 0: break
|
||||
if "role" in delta: continue
|
||||
|
||||
json_data = json.loads(str(chunk))['choices'][0]
|
||||
delta = json_data["delta"]
|
||||
if len(delta) == 0:
|
||||
break
|
||||
if "role" in delta:
|
||||
continue
|
||||
if "content" in delta:
|
||||
result += delta["content"]
|
||||
if not console_slience: print(delta["content"], end='')
|
||||
@ -184,11 +177,14 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
# 观测窗,把已经获取的数据显示出去
|
||||
if len(observe_window) >= 1: observe_window[0] += delta["content"]
|
||||
# 看门狗,如果超过期限没有喂狗,则终止
|
||||
if len(observe_window) >= 2:
|
||||
if len(observe_window) >= 2000:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("用户取消了程序。")
|
||||
else: raise RuntimeError("意外Json结构:"+delta)
|
||||
if chunk['finish_reason'] == 'length':
|
||||
else:
|
||||
raise RuntimeError("意外Json结构:"+delta)
|
||||
if json_data['finish_reason'] == 'content_filter':
|
||||
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
|
||||
if json_data['finish_reason'] == 'length':
|
||||
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||
return result
|
||||
|
||||
|
||||
@ -12,20 +12,18 @@
|
||||
"""
|
||||
|
||||
import json
|
||||
import random
|
||||
import time
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
import func_box
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
|
||||
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
|
||||
get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
|
||||
get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||
@ -62,7 +60,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=False
|
||||
from request_llm.bridge_all import model_info
|
||||
from .bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||
@ -108,7 +106,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
return result
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
发送至chatGPT,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
@ -136,22 +134,24 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
raw_input = inputs
|
||||
logging.info(f'[raw_input]_{llm_kwargs["ipaddr"]} {raw_input}')
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
chatbot.append((inputs, ""))
|
||||
loading_msg = func_box.spinner_chatbot_loading(chatbot)
|
||||
yield from update_ui(chatbot=loading_msg, history=history, msg="等待响应") # 刷新界面
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
try:
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||
except RuntimeError as e:
|
||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||
return
|
||||
|
||||
history.append(inputs); history.append("")
|
||||
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=True
|
||||
from request_llm.bridge_all import model_info
|
||||
from .bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||
@ -163,6 +163,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
|
||||
gpt_replying_buffer = ""
|
||||
|
||||
is_head_of_the_stream = True
|
||||
if stream:
|
||||
stream_response = response.iter_lines()
|
||||
@ -180,44 +181,47 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
|
||||
# 数据流的第一帧不携带content
|
||||
is_head_of_the_stream = False; continue
|
||||
|
||||
|
||||
if chunk:
|
||||
try:
|
||||
chunk_decoded = chunk.decode()
|
||||
# 前者API2D的
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
logging.info(f'[response]_{llm_kwargs["ipaddr"]} {gpt_replying_buffer}')
|
||||
logging.info(f'[response] {gpt_replying_buffer}')
|
||||
break
|
||||
# 处理数据流的主体
|
||||
chunkjson = json.loads(chunk_decoded[6:])
|
||||
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
|
||||
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
count_time = round(time.time() - llm_kwargs['start_time'], 3)
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}\t" \
|
||||
f"本次对话耗时: {func_box.html_tag_color(tag=f'{count_time}s')}"
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||
chunk = get_full_error(chunk, stream_response)
|
||||
chunk_decoded = chunk.decode()
|
||||
error_msg = chunk_decoded
|
||||
openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup'
|
||||
if "reduce the length" in error_msg:
|
||||
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
||||
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
||||
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
||||
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
||||
# history = [] # 清除历史
|
||||
elif "does not exist" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
|
||||
elif "Incorrect API key" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务.")
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website)
|
||||
elif "exceeded your current quota" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务.")
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website)
|
||||
elif "account is not active" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website)
|
||||
elif "associated with a deactivated account" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website)
|
||||
elif "bad forward key" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
||||
elif "Not enough point" in error_msg:
|
||||
@ -228,9 +232,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||
return
|
||||
count_tokens = func_box.num_tokens_from_string(listing=history)
|
||||
status_text += f'\t 本次对话使用tokens: {func_box.html_tag_color(count_tokens)}'
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||
|
||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
"""
|
||||
@ -238,18 +239,14 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
"""
|
||||
if not is_any_api_key(llm_kwargs['api_key']):
|
||||
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
||||
if llm_kwargs['llm_model'].startswith('proxy-'):
|
||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"api-key": f"{api_key}"
|
||||
}
|
||||
else:
|
||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}"
|
||||
}
|
||||
|
||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}"
|
||||
}
|
||||
if API_ORG.startswith('org-'): headers.update({"OpenAI-Organization": API_ORG})
|
||||
|
||||
conversation_cnt = len(history) // 2
|
||||
|
||||
@ -286,20 +283,9 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
"frequency_penalty": 0,
|
||||
}
|
||||
try:
|
||||
print("\033[1;35m", f"{llm_kwargs['llm_model']}_{llm_kwargs['ipaddr']} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........")
|
||||
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
||||
except:
|
||||
print('输入中可能存在乱码。')
|
||||
return headers, payload
|
||||
return headers,payload
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
llm_kwargs = {
|
||||
'api_key': 'sk-',
|
||||
'llm_model': 'gpt-3.5-turbo',
|
||||
'top_p': 1,
|
||||
'max_length': 512,
|
||||
'temperature': 1,
|
||||
# 'ipaddr': ipaddr.client.host
|
||||
}
|
||||
chat = []
|
||||
predict('你好', llm_kwargs=llm_kwargs, chatbot=chat, plugin_kwargs={})
|
||||
print(chat)
|
||||
@ -1,254 +0,0 @@
|
||||
"""
|
||||
========================================================================
|
||||
第一部分:来自EdgeGPT.py
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
========================================================================
|
||||
"""
|
||||
from .edge_gpt import NewbingChatbot
|
||||
load_message = "等待NewBing响应。"
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第二部分:子进程Worker(调用主体)
|
||||
========================================================================
|
||||
"""
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
import logging
|
||||
import asyncio
|
||||
import importlib
|
||||
import threading
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
def preprocess_newbing_out(s):
|
||||
pattern = r'\^(\d+)\^' # 匹配^数字^
|
||||
sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
|
||||
result = re.sub(pattern, sub, s) # 替换操作
|
||||
if '[1]' in result:
|
||||
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
||||
return result
|
||||
|
||||
def preprocess_newbing_out_simple(result):
|
||||
if '[1]' in result:
|
||||
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
||||
return result
|
||||
|
||||
class NewBingHandle(Process):
|
||||
def __init__(self):
|
||||
super().__init__(daemon=True)
|
||||
self.parent, self.child = Pipe()
|
||||
self.newbing_model = None
|
||||
self.info = ""
|
||||
self.success = True
|
||||
self.local_history = []
|
||||
self.check_dependency()
|
||||
self.start()
|
||||
self.threadLock = threading.Lock()
|
||||
|
||||
def check_dependency(self):
|
||||
try:
|
||||
self.success = False
|
||||
import certifi, httpx, rich
|
||||
self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
|
||||
self.success = True
|
||||
except:
|
||||
self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
|
||||
self.success = False
|
||||
|
||||
def ready(self):
|
||||
return self.newbing_model is not None
|
||||
|
||||
async def async_run(self):
|
||||
# 读取配置
|
||||
NEWBING_STYLE, = get_conf('NEWBING_STYLE')
|
||||
from request_llm.bridge_all import model_info
|
||||
endpoint = model_info['newbing']['endpoint']
|
||||
while True:
|
||||
# 等待
|
||||
kwargs = self.child.recv()
|
||||
question=kwargs['query']
|
||||
history=kwargs['history']
|
||||
system_prompt=kwargs['system_prompt']
|
||||
|
||||
# 是否重置
|
||||
if len(self.local_history) > 0 and len(history)==0:
|
||||
await self.newbing_model.reset()
|
||||
self.local_history = []
|
||||
|
||||
# 开始问问题
|
||||
prompt = ""
|
||||
if system_prompt not in self.local_history:
|
||||
self.local_history.append(system_prompt)
|
||||
prompt += system_prompt + '\n'
|
||||
|
||||
# 追加历史
|
||||
for ab in history:
|
||||
a, b = ab
|
||||
if a not in self.local_history:
|
||||
self.local_history.append(a)
|
||||
prompt += a + '\n'
|
||||
# if b not in self.local_history:
|
||||
# self.local_history.append(b)
|
||||
# prompt += b + '\n'
|
||||
|
||||
# 问题
|
||||
prompt += question
|
||||
self.local_history.append(question)
|
||||
print('question:', prompt)
|
||||
# 提交
|
||||
async for final, response in self.newbing_model.ask_stream(
|
||||
prompt=question,
|
||||
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
|
||||
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
|
||||
):
|
||||
if not final:
|
||||
print(response)
|
||||
self.child.send(str(response))
|
||||
else:
|
||||
print('-------- receive final ---------')
|
||||
self.child.send('[Finish]')
|
||||
# self.local_history.append(response)
|
||||
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
这个函数运行在子进程
|
||||
"""
|
||||
# 第一次运行,加载参数
|
||||
self.success = False
|
||||
self.local_history = []
|
||||
if (self.newbing_model is None) or (not self.success):
|
||||
# 代理设置
|
||||
proxies, = get_conf('proxies')
|
||||
if proxies is None:
|
||||
self.proxies_https = None
|
||||
else:
|
||||
self.proxies_https = proxies['https']
|
||||
# cookie
|
||||
NEWBING_COOKIES, = get_conf('NEWBING_COOKIES')
|
||||
try:
|
||||
cookies = json.loads(NEWBING_COOKIES)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] 不能加载Newbing组件。NEWBING_COOKIES未填写或有格式错误。')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
raise RuntimeError(f"不能加载Newbing组件。NEWBING_COOKIES未填写或有格式错误。")
|
||||
|
||||
try:
|
||||
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] 不能加载Newbing组件。{tb_str}')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
raise RuntimeError(f"不能加载Newbing组件。")
|
||||
|
||||
self.success = True
|
||||
try:
|
||||
# 进入任务等待状态
|
||||
asyncio.run(self.async_run())
|
||||
except Exception:
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
|
||||
def stream_chat(self, **kwargs):
|
||||
"""
|
||||
这个函数运行在主进程
|
||||
"""
|
||||
self.threadLock.acquire()
|
||||
self.parent.send(kwargs) # 发送请求到子进程
|
||||
while True:
|
||||
res = self.parent.recv() # 等待newbing回复的片段
|
||||
if res == '[Finish]':
|
||||
break # 结束
|
||||
elif res == '[Fail]':
|
||||
self.success = False
|
||||
break
|
||||
else:
|
||||
yield res # newbing回复的片段
|
||||
self.threadLock.release()
|
||||
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第三部分:主进程统一调用函数接口
|
||||
========================================================================
|
||||
"""
|
||||
global newbing_handle
|
||||
newbing_handle = None
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
global newbing_handle
|
||||
if (newbing_handle is None) or (not newbing_handle.success):
|
||||
newbing_handle = NewBingHandle()
|
||||
observe_window[0] = load_message + "\n\n" + newbing_handle.info
|
||||
if not newbing_handle.success:
|
||||
error = newbing_handle.info
|
||||
newbing_handle = None
|
||||
raise RuntimeError(error)
|
||||
|
||||
# 没有 sys_prompt 接口,因此把prompt加入 history
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||
|
||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||
response = ""
|
||||
observe_window[0] = "[Local Message]: 等待NewBing响应中 ..."
|
||||
for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
observe_window[0] = preprocess_newbing_out_simple(response)
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return preprocess_newbing_out_simple(response)
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
单线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ..."))
|
||||
|
||||
global newbing_handle
|
||||
if (newbing_handle is None) or (not newbing_handle.success):
|
||||
newbing_handle = NewBingHandle()
|
||||
chatbot[-1] = (inputs, load_message + "\n\n" + newbing_handle.info)
|
||||
yield from update_ui(chatbot=chatbot, history=[])
|
||||
if not newbing_handle.success:
|
||||
newbing_handle = None
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||
|
||||
chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...")
|
||||
response = "[Local Message]: 等待NewBing响应中 ..."
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常,请刷新界面重试 ..."
|
||||
history.extend([inputs, response])
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {response}')
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
||||
|
||||
@ -89,9 +89,6 @@ class NewBingHandle(Process):
|
||||
if a not in self.local_history:
|
||||
self.local_history.append(a)
|
||||
prompt += a + '\n'
|
||||
# if b not in self.local_history:
|
||||
# self.local_history.append(b)
|
||||
# prompt += b + '\n'
|
||||
|
||||
# 问题
|
||||
prompt += question
|
||||
@ -101,7 +98,7 @@ class NewBingHandle(Process):
|
||||
async for final, response in self.newbing_model.ask_stream(
|
||||
prompt=question,
|
||||
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
|
||||
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
|
||||
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
|
||||
):
|
||||
if not final:
|
||||
print(response)
|
||||
@ -121,14 +118,26 @@ class NewBingHandle(Process):
|
||||
self.local_history = []
|
||||
if (self.newbing_model is None) or (not self.success):
|
||||
# 代理设置
|
||||
proxies, = get_conf('proxies')
|
||||
proxies, NEWBING_COOKIES = get_conf('proxies', 'NEWBING_COOKIES')
|
||||
if proxies is None:
|
||||
self.proxies_https = None
|
||||
else:
|
||||
self.proxies_https = proxies['https']
|
||||
|
||||
if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
|
||||
try:
|
||||
cookies = json.loads(NEWBING_COOKIES)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] NEWBING_COOKIES未填写或有格式错误。')
|
||||
self.child.send('[Fail]'); self.child.send('[Finish]')
|
||||
raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
|
||||
else:
|
||||
cookies = None
|
||||
|
||||
try:
|
||||
self.newbing_model = NewbingChatbot(proxy=self.proxies_https)
|
||||
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
@ -143,7 +152,7 @@ class NewBingHandle(Process):
|
||||
asyncio.run(self.async_run())
|
||||
except Exception:
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
||||
self.child.send(f'[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题,建议更换代理协议(推荐http)或代理节点 {tb_str}.')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
|
||||
@ -151,18 +160,14 @@ class NewBingHandle(Process):
|
||||
"""
|
||||
这个函数运行在主进程
|
||||
"""
|
||||
self.threadLock.acquire()
|
||||
self.parent.send(kwargs) # 发送请求到子进程
|
||||
self.threadLock.acquire() # 获取线程锁
|
||||
self.parent.send(kwargs) # 请求子进程
|
||||
while True:
|
||||
res = self.parent.recv() # 等待newbing回复的片段
|
||||
if res == '[Finish]':
|
||||
break # 结束
|
||||
elif res == '[Fail]':
|
||||
self.success = False
|
||||
break
|
||||
else:
|
||||
yield res # newbing回复的片段
|
||||
self.threadLock.release()
|
||||
res = self.parent.recv() # 等待newbing回复的片段
|
||||
if res == '[Finish]': break # 结束
|
||||
elif res == '[Fail]': self.success = False; break # 失败
|
||||
else: yield res # newbing回复的片段
|
||||
self.threadLock.release() # 释放线程锁
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from .bridge_newbing import preprocess_newbing_out, preprocess_newbing_out_simple
|
||||
from .bridge_newbingfree import preprocess_newbing_out, preprocess_newbing_out_simple
|
||||
from multiprocessing import Process, Pipe
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
import threading
|
||||
|
||||
@ -1,409 +0,0 @@
|
||||
"""
|
||||
========================================================================
|
||||
第一部分:来自EdgeGPT.py
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
========================================================================
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import ssl
|
||||
import sys
|
||||
import uuid
|
||||
from enum import Enum
|
||||
from typing import Generator
|
||||
from typing import Literal
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
import websockets.client as websockets
|
||||
|
||||
DELIMITER = "\x1e"
|
||||
|
||||
|
||||
# Generate random IP between range 13.104.0.0/14
|
||||
FORWARDED_IP = (
|
||||
f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
||||
)
|
||||
|
||||
HEADERS = {
|
||||
"accept": "application/json",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
|
||||
"sec-ch-ua-arch": '"x86"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-full-version": '"109.0.1518.78"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": "",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-ch-ua-platform-version": '"15.0.0"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"x-ms-client-request-id": str(uuid.uuid4()),
|
||||
"x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
|
||||
"Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
|
||||
"Referrer-Policy": "origin-when-cross-origin",
|
||||
"x-forwarded-for": FORWARDED_IP,
|
||||
}
|
||||
|
||||
HEADERS_INIT_CONVER = {
|
||||
"authority": "edgeservices.bing.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
||||
"sec-ch-ua-arch": '"x86"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-full-version": '"110.0.1587.69"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-ch-ua-platform-version": '"15.0.0"',
|
||||
"sec-fetch-dest": "document",
|
||||
"sec-fetch-mode": "navigate",
|
||||
"sec-fetch-site": "none",
|
||||
"sec-fetch-user": "?1",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
|
||||
"x-edge-shopping-flag": "1",
|
||||
"x-forwarded-for": FORWARDED_IP,
|
||||
}
|
||||
|
||||
def get_ssl_context():
|
||||
import certifi
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.load_verify_locations(certifi.where())
|
||||
return ssl_context
|
||||
|
||||
|
||||
|
||||
class NotAllowedToAccess(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConversationStyle(Enum):
|
||||
creative = "h3imaginative,clgalileo,gencontentv3"
|
||||
balanced = "galileo"
|
||||
precise = "h3precise,clgalileo"
|
||||
|
||||
|
||||
CONVERSATION_STYLE_TYPE = Optional[
|
||||
Union[ConversationStyle, Literal["creative", "balanced", "precise"]]
|
||||
]
|
||||
|
||||
|
||||
def _append_identifier(msg: dict) -> str:
|
||||
"""
|
||||
Appends special character to end of message to identify end of message
|
||||
"""
|
||||
# Convert dict to json string
|
||||
return json.dumps(msg) + DELIMITER
|
||||
|
||||
|
||||
def _get_ran_hex(length: int = 32) -> str:
|
||||
"""
|
||||
Returns random hex string
|
||||
"""
|
||||
return "".join(random.choice("0123456789abcdef") for _ in range(length))
|
||||
|
||||
|
||||
class _ChatHubRequest:
|
||||
"""
|
||||
Request object for ChatHub
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
conversation_signature: str,
|
||||
client_id: str,
|
||||
conversation_id: str,
|
||||
invocation_id: int = 0,
|
||||
) -> None:
|
||||
self.struct: dict = {}
|
||||
|
||||
self.client_id: str = client_id
|
||||
self.conversation_id: str = conversation_id
|
||||
self.conversation_signature: str = conversation_signature
|
||||
self.invocation_id: int = invocation_id
|
||||
|
||||
def update(
|
||||
self,
|
||||
prompt,
|
||||
conversation_style,
|
||||
options,
|
||||
) -> None:
|
||||
"""
|
||||
Updates request object
|
||||
"""
|
||||
if options is None:
|
||||
options = [
|
||||
"deepleo",
|
||||
"enable_debug_commands",
|
||||
"disable_emoji_spoken_text",
|
||||
"enablemm",
|
||||
]
|
||||
if conversation_style:
|
||||
if not isinstance(conversation_style, ConversationStyle):
|
||||
conversation_style = getattr(ConversationStyle, conversation_style)
|
||||
options = [
|
||||
"nlu_direct_response_filter",
|
||||
"deepleo",
|
||||
"disable_emoji_spoken_text",
|
||||
"responsible_ai_policy_235",
|
||||
"enablemm",
|
||||
conversation_style.value,
|
||||
"dtappid",
|
||||
"cricinfo",
|
||||
"cricinfov2",
|
||||
"dv3sugg",
|
||||
]
|
||||
self.struct = {
|
||||
"arguments": [
|
||||
{
|
||||
"source": "cib",
|
||||
"optionsSets": options,
|
||||
"sliceIds": [
|
||||
"222dtappid",
|
||||
"225cricinfo",
|
||||
"224locals0",
|
||||
],
|
||||
"traceId": _get_ran_hex(32),
|
||||
"isStartOfSession": self.invocation_id == 0,
|
||||
"message": {
|
||||
"author": "user",
|
||||
"inputMethod": "Keyboard",
|
||||
"text": prompt,
|
||||
"messageType": "Chat",
|
||||
},
|
||||
"conversationSignature": self.conversation_signature,
|
||||
"participant": {
|
||||
"id": self.client_id,
|
||||
},
|
||||
"conversationId": self.conversation_id,
|
||||
},
|
||||
],
|
||||
"invocationId": str(self.invocation_id),
|
||||
"target": "chat",
|
||||
"type": 4,
|
||||
}
|
||||
self.invocation_id += 1
|
||||
|
||||
|
||||
class _Conversation:
|
||||
"""
|
||||
Conversation API
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cookies,
|
||||
proxy,
|
||||
) -> None:
|
||||
self.struct: dict = {
|
||||
"conversationId": None,
|
||||
"clientId": None,
|
||||
"conversationSignature": None,
|
||||
"result": {"value": "Success", "message": None},
|
||||
}
|
||||
import httpx
|
||||
self.proxy = proxy
|
||||
proxy = (
|
||||
proxy
|
||||
or os.environ.get("all_proxy")
|
||||
or os.environ.get("ALL_PROXY")
|
||||
or os.environ.get("https_proxy")
|
||||
or os.environ.get("HTTPS_PROXY")
|
||||
or None
|
||||
)
|
||||
if proxy is not None and proxy.startswith("socks5h://"):
|
||||
proxy = "socks5://" + proxy[len("socks5h://") :]
|
||||
self.session = httpx.Client(
|
||||
proxies=proxy,
|
||||
timeout=30,
|
||||
headers=HEADERS_INIT_CONVER,
|
||||
)
|
||||
for cookie in cookies:
|
||||
self.session.cookies.set(cookie["name"], cookie["value"])
|
||||
|
||||
# Send GET request
|
||||
response = self.session.get(
|
||||
url=os.environ.get("BING_PROXY_URL")
|
||||
or "https://edgeservices.bing.com/edgesvc/turing/conversation/create",
|
||||
)
|
||||
if response.status_code != 200:
|
||||
response = self.session.get(
|
||||
"https://edge.churchless.tech/edgesvc/turing/conversation/create",
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print(f"Status code: {response.status_code}")
|
||||
print(response.text)
|
||||
print(response.url)
|
||||
raise Exception("Authentication failed")
|
||||
try:
|
||||
self.struct = response.json()
|
||||
except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:
|
||||
raise Exception(
|
||||
"Authentication failed. You have not been accepted into the beta.",
|
||||
) from exc
|
||||
if self.struct["result"]["value"] == "UnauthorizedRequest":
|
||||
raise NotAllowedToAccess(self.struct["result"]["message"])
|
||||
|
||||
|
||||
class _ChatHub:
|
||||
"""
|
||||
Chat API
|
||||
"""
|
||||
|
||||
def __init__(self, conversation) -> None:
|
||||
self.wss = None
|
||||
self.request: _ChatHubRequest
|
||||
self.loop: bool
|
||||
self.task: asyncio.Task
|
||||
print(conversation.struct)
|
||||
self.request = _ChatHubRequest(
|
||||
conversation_signature=conversation.struct["conversationSignature"],
|
||||
client_id=conversation.struct["clientId"],
|
||||
conversation_id=conversation.struct["conversationId"],
|
||||
)
|
||||
|
||||
async def ask_stream(
|
||||
self,
|
||||
prompt: str,
|
||||
wss_link: str,
|
||||
conversation_style: CONVERSATION_STYLE_TYPE = None,
|
||||
raw: bool = False,
|
||||
options: dict = None,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Ask a question to the bot
|
||||
"""
|
||||
if self.wss and not self.wss.closed:
|
||||
await self.wss.close()
|
||||
# Check if websocket is closed
|
||||
self.wss = await websockets.connect(
|
||||
wss_link,
|
||||
extra_headers=HEADERS,
|
||||
max_size=None,
|
||||
ssl=get_ssl_context()
|
||||
)
|
||||
await self._initial_handshake()
|
||||
# Construct a ChatHub request
|
||||
self.request.update(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style,
|
||||
options=options,
|
||||
)
|
||||
# Send request
|
||||
await self.wss.send(_append_identifier(self.request.struct))
|
||||
final = False
|
||||
while not final:
|
||||
objects = str(await self.wss.recv()).split(DELIMITER)
|
||||
for obj in objects:
|
||||
if obj is None or not obj:
|
||||
continue
|
||||
response = json.loads(obj)
|
||||
if response.get("type") != 2 and raw:
|
||||
yield False, response
|
||||
elif response.get("type") == 1 and response["arguments"][0].get(
|
||||
"messages",
|
||||
):
|
||||
resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][
|
||||
0
|
||||
]["body"][0].get("text")
|
||||
yield False, resp_txt
|
||||
elif response.get("type") == 2:
|
||||
final = True
|
||||
yield True, response
|
||||
|
||||
async def _initial_handshake(self) -> None:
|
||||
await self.wss.send(_append_identifier({"protocol": "json", "version": 1}))
|
||||
await self.wss.recv()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the connection
|
||||
"""
|
||||
if self.wss and not self.wss.closed:
|
||||
await self.wss.close()
|
||||
|
||||
|
||||
class NewbingChatbot:
|
||||
"""
|
||||
Combines everything to make it seamless
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cookies,
|
||||
proxy
|
||||
) -> None:
|
||||
if cookies is None:
|
||||
cookies = {}
|
||||
self.cookies = cookies
|
||||
self.proxy = proxy
|
||||
self.chat_hub: _ChatHub = _ChatHub(
|
||||
_Conversation(self.cookies, self.proxy),
|
||||
)
|
||||
|
||||
async def ask(
|
||||
self,
|
||||
prompt: str,
|
||||
wss_link: str,
|
||||
conversation_style: CONVERSATION_STYLE_TYPE = None,
|
||||
options: dict = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Ask a question to the bot
|
||||
"""
|
||||
async for final, response in self.chat_hub.ask_stream(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style,
|
||||
wss_link=wss_link,
|
||||
options=options,
|
||||
):
|
||||
if final:
|
||||
return response
|
||||
await self.chat_hub.wss.close()
|
||||
return None
|
||||
|
||||
async def ask_stream(
|
||||
self,
|
||||
prompt: str,
|
||||
wss_link: str,
|
||||
conversation_style: CONVERSATION_STYLE_TYPE = None,
|
||||
raw: bool = False,
|
||||
options: dict = None,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Ask a question to the bot
|
||||
"""
|
||||
async for response in self.chat_hub.ask_stream(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style,
|
||||
wss_link=wss_link,
|
||||
raw=raw,
|
||||
options=options,
|
||||
):
|
||||
yield response
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the connection
|
||||
"""
|
||||
await self.chat_hub.close()
|
||||
|
||||
async def reset(self) -> None:
|
||||
"""
|
||||
Reset the conversation
|
||||
"""
|
||||
await self.close()
|
||||
self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy))
|
||||
|
||||
|
||||
@ -15,11 +15,4 @@ pymupdf
|
||||
openai
|
||||
numpy
|
||||
arxiv
|
||||
pymupdf
|
||||
pyperclip
|
||||
scikit-learn
|
||||
psutil
|
||||
distro
|
||||
python-dotenv
|
||||
rich
|
||||
Levenshtein
|
||||
rich
|
||||
235
theme.py
235
theme.py
@ -1,6 +1,6 @@
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, ADD_CHUANHU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'ADD_CHUANHU')
|
||||
CODE_HIGHLIGHT, ADD_WAIFU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU')
|
||||
# gradio可用颜色列表
|
||||
# gr.themes.utils.colors.slate (石板色)
|
||||
# gr.themes.utils.colors.gray (灰色)
|
||||
@ -29,185 +29,105 @@ CODE_HIGHLIGHT, ADD_WAIFU, ADD_CHUANHU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU',
|
||||
def adjust_theme():
|
||||
|
||||
try:
|
||||
set_theme = gr.themes.Soft(
|
||||
primary_hue=gr.themes.Color(
|
||||
c50="#EBFAF2",
|
||||
c100="#CFF3E1",
|
||||
c200="#A8EAC8",
|
||||
c300="#77DEA9",
|
||||
c400="#3FD086",
|
||||
c500="#02C160",
|
||||
c600="#06AE56",
|
||||
c700="#05974E",
|
||||
c800="#057F45",
|
||||
c900="#04673D",
|
||||
c950="#2E5541",
|
||||
name="small_and_beautiful",
|
||||
),
|
||||
secondary_hue=gr.themes.Color(
|
||||
c50="#576b95",
|
||||
c100="#576b95",
|
||||
c200="#576b95",
|
||||
c300="#576b95",
|
||||
c400="#576b95",
|
||||
c500="#576b95",
|
||||
c600="#576b95",
|
||||
c700="#576b95",
|
||||
c800="#576b95",
|
||||
c900="#576b95",
|
||||
c950="#576b95",
|
||||
),
|
||||
neutral_hue=gr.themes.Color(
|
||||
name="gray",
|
||||
c50="#f6f7f8",
|
||||
# c100="#f3f4f6",
|
||||
c100="#F2F2F2",
|
||||
c200="#e5e7eb",
|
||||
c300="#d1d5db",
|
||||
c400="#B2B2B2",
|
||||
c500="#808080",
|
||||
c600="#636363",
|
||||
c700="#515151",
|
||||
c800="#393939",
|
||||
# c900="#272727",
|
||||
c900="#2B2B2B",
|
||||
c950="#171717",
|
||||
),
|
||||
|
||||
radius_size=gr.themes.sizes.radius_sm,
|
||||
).set(
|
||||
button_primary_background_fill="*primary_500",
|
||||
button_primary_background_fill_dark="*primary_600",
|
||||
button_primary_background_fill_hover="*primary_400",
|
||||
button_primary_border_color="*primary_500",
|
||||
button_primary_border_color_dark="*primary_600",
|
||||
button_primary_text_color="wihte",
|
||||
button_primary_text_color_dark="white",
|
||||
button_secondary_background_fill="*neutral_100",
|
||||
button_secondary_background_fill_hover="*neutral_50",
|
||||
button_secondary_background_fill_dark="*neutral_900",
|
||||
button_secondary_text_color="*neutral_800",
|
||||
button_secondary_text_color_dark="white",
|
||||
background_fill_primary="#F7F7F7",
|
||||
background_fill_primary_dark="#1F1F1F",
|
||||
block_title_text_color="*primary_500",
|
||||
block_title_background_fill_dark="*primary_900",
|
||||
block_label_background_fill_dark="*primary_900",
|
||||
input_background_fill="#F6F6F6",
|
||||
chatbot_code_background_color="*neutral_950",
|
||||
chatbot_code_background_color_dark="*neutral_950",
|
||||
color_er = gr.themes.utils.colors.fuchsia
|
||||
set_theme = gr.themes.Default(
|
||||
primary_hue=gr.themes.utils.colors.orange,
|
||||
neutral_hue=gr.themes.utils.colors.gray,
|
||||
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
|
||||
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
|
||||
set_theme.set(
|
||||
# Colors
|
||||
input_background_fill_dark="*neutral_800",
|
||||
# Transition
|
||||
button_transition="none",
|
||||
# Shadows
|
||||
button_shadow="*shadow_drop",
|
||||
button_shadow_hover="*shadow_drop_lg",
|
||||
button_shadow_active="*shadow_inset",
|
||||
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
|
||||
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
|
||||
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
|
||||
checkbox_label_shadow="*shadow_drop",
|
||||
block_shadow="*shadow_drop",
|
||||
form_gap_width="1px",
|
||||
# Button borders
|
||||
input_border_width="1px",
|
||||
input_background_fill="white",
|
||||
# Gradients
|
||||
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
|
||||
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
|
||||
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
|
||||
error_background_fill_dark="*background_fill_primary",
|
||||
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
|
||||
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
|
||||
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
|
||||
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
|
||||
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
|
||||
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
|
||||
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
|
||||
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
|
||||
button_primary_border_color_dark="*primary_500",
|
||||
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
|
||||
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
|
||||
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
|
||||
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
|
||||
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
|
||||
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
|
||||
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
|
||||
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
|
||||
button_cancel_border_color=color_er.c200,
|
||||
button_cancel_border_color_dark=color_er.c600,
|
||||
button_cancel_text_color=color_er.c600,
|
||||
button_cancel_text_color_dark="white",
|
||||
)
|
||||
js = ''
|
||||
if ADD_CHUANHU:
|
||||
with open("./docs/assets/custom.js", "r", encoding="utf-8") as f, \
|
||||
open("./docs/assets/external-scripts.js", "r", encoding="utf-8") as f1:
|
||||
customJS = f.read()
|
||||
externalScripts = f1.read()
|
||||
js += f'<script>{customJS}</script><script async>{externalScripts}</script>'
|
||||
|
||||
# 添加一个萌萌的看板娘
|
||||
if ADD_WAIFU:
|
||||
js += """
|
||||
js = """
|
||||
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
||||
"""
|
||||
gradio_original_template_fn = gr.routes.templates.TemplateResponse
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
gradio_original_template_fn = gr.routes.templates.TemplateResponse
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
except:
|
||||
set_theme = None
|
||||
print('gradio版本较旧, 不能自定义字体和颜色')
|
||||
return set_theme
|
||||
|
||||
|
||||
with open("docs/assets/custom.css", "r", encoding="utf-8") as f:
|
||||
customCSS = f.read()
|
||||
custom_css = customCSS
|
||||
advanced_css = """
|
||||
#debug_mes {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
z-index: 1; /* 设置更高的 z-index 值 */
|
||||
margin-bottom: 10px !important;
|
||||
}
|
||||
#chat_txt {
|
||||
display: flex;
|
||||
flex-direction: column-reverse;
|
||||
overflow-y: auto !important;
|
||||
z-index: 3;
|
||||
flex-grow: 1; /* 自动填充剩余空间 */
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
margin-bottom: 35px !important;
|
||||
}
|
||||
#sm_btn {
|
||||
display: flex;
|
||||
flex-wrap: unset !important;
|
||||
gap: 5px !important;
|
||||
width: var(--size-full);
|
||||
}
|
||||
textarea {
|
||||
resize: none;
|
||||
height: 100%; /* 填充父元素的高度 */
|
||||
}
|
||||
#main_chatbot {
|
||||
height: 75vh !important;
|
||||
max-height: 75vh !important;
|
||||
/* overflow: auto !important; */
|
||||
z-index: 2;
|
||||
}
|
||||
#prompt_result{
|
||||
height: 60vh !important;
|
||||
max-height: 60vh !important;
|
||||
}
|
||||
.wrap.svelte-18telvq.svelte-18telvq {
|
||||
padding: var(--block-padding) !important;
|
||||
height: 100% !important;
|
||||
max-height: 95% !important;
|
||||
overflow-y: auto !important;
|
||||
}
|
||||
.app.svelte-1mya07g.svelte-1mya07g {
|
||||
max-width: 100%;
|
||||
position: relative;
|
||||
/* margin: auto; */
|
||||
padding: var(--size-4);
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
.md-message table {
|
||||
.markdown-body table {
|
||||
margin: 1em 0;
|
||||
border-collapse: collapse;
|
||||
empty-cells: show;
|
||||
}
|
||||
|
||||
.md-message th, .md-message td {
|
||||
.markdown-body th, .markdown-body td {
|
||||
border: 1.2px solid var(--border-color-primary);
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
.md-message thead {
|
||||
.markdown-body thead {
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
}
|
||||
|
||||
.md-message thead th {
|
||||
.markdown-body thead th {
|
||||
padding: .5em .2em;
|
||||
}
|
||||
|
||||
.md-message ol, .md-message ul {
|
||||
.markdown-body ol, .markdown-body ul {
|
||||
padding-inline-start: 2em !important;
|
||||
}
|
||||
|
||||
/* chat box. */
|
||||
[class *= "message"] {
|
||||
gap: 7px !important;
|
||||
border-radius: var(--radius-xl) !important;
|
||||
/* padding: var(--spacing-xl) !important; */
|
||||
/* font-size: var(--text-md) !important; */
|
||||
@ -217,40 +137,27 @@ textarea {
|
||||
}
|
||||
[data-testid = "bot"] {
|
||||
max-width: 95%;
|
||||
letter-spacing: 0.5px;
|
||||
font-weight: normal;
|
||||
/* width: auto !important; */
|
||||
border-bottom-left-radius: 0 !important;
|
||||
}
|
||||
|
||||
.dark [data-testid = "bot"] {
|
||||
max-width: 95%;
|
||||
color: #ccd2db !important;
|
||||
letter-spacing: 0.5px;
|
||||
font-weight: normal;
|
||||
/* width: auto !important; */
|
||||
border-bottom-left-radius: 0 !important;
|
||||
}
|
||||
|
||||
[data-testid = "user"] {
|
||||
max-width: 100%;
|
||||
letter-spacing: 0.5px;
|
||||
/* width: auto !important; */
|
||||
border-bottom-right-radius: 0 !important;
|
||||
}
|
||||
|
||||
/* linein code block. */
|
||||
.md-message code {
|
||||
.markdown-body code {
|
||||
display: inline;
|
||||
white-space: break-spaces;
|
||||
border-radius: 6px;
|
||||
margin: 0 2px 0 2px;
|
||||
padding: .2em .4em .1em .4em;
|
||||
background-color: rgba(13, 17, 23, 0.95);
|
||||
color: #eff0f2;
|
||||
color: #c9d1d9;
|
||||
}
|
||||
|
||||
.dark .md-message code {
|
||||
.dark .markdown-body code {
|
||||
display: inline;
|
||||
white-space: break-spaces;
|
||||
border-radius: 6px;
|
||||
@ -260,7 +167,7 @@ textarea {
|
||||
}
|
||||
|
||||
/* code block css */
|
||||
.md-message pre code {
|
||||
.markdown-body pre code {
|
||||
display: block;
|
||||
overflow: auto;
|
||||
white-space: pre;
|
||||
@ -270,7 +177,7 @@ textarea {
|
||||
margin: 1em 2em 1em 0.5em;
|
||||
}
|
||||
|
||||
.dark .md-message pre code {
|
||||
.dark .markdown-body pre code {
|
||||
display: block;
|
||||
overflow: auto;
|
||||
white-space: pre;
|
||||
|
||||
249
toolbox.py
249
toolbox.py
@ -1,18 +1,11 @@
|
||||
import html
|
||||
import markdown
|
||||
import importlib
|
||||
import time
|
||||
import inspect
|
||||
import gradio as gr
|
||||
import func_box
|
||||
import re
|
||||
import os
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
from functools import wraps, lru_cache
|
||||
import shutil
|
||||
import os
|
||||
import time
|
||||
import glob
|
||||
import sys
|
||||
import threading
|
||||
############################### 插件输入输出接驳区 #######################################
|
||||
pj = os.path.join
|
||||
|
||||
"""
|
||||
@ -47,62 +40,36 @@ def ArgsGeneralWrapper(f):
|
||||
"""
|
||||
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
||||
"""
|
||||
def decorated(cookies, max_length, llm_model, txt, top_p, temperature,
|
||||
chatbot, history, system_prompt, models, plugin_advanced_arg, ipaddr: gr.Request, *args):
|
||||
""""""
|
||||
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
|
||||
txt_passon = txt
|
||||
if txt == "" and txt2 != "": txt_passon = txt2
|
||||
# 引入一个有cookie的chatbot
|
||||
start_time = time.time()
|
||||
encrypt, private = get_conf('switch_model')[0]['key']
|
||||
private_key, = get_conf('private_key')
|
||||
cookies.update({
|
||||
'top_p':top_p,
|
||||
'temperature':temperature,
|
||||
})
|
||||
|
||||
llm_kwargs = {
|
||||
'api_key': cookies['api_key'],
|
||||
'llm_model': llm_model,
|
||||
'top_p':top_p,
|
||||
'max_length': max_length,
|
||||
'temperature': temperature,
|
||||
'ipaddr': ipaddr.client.host,
|
||||
'start_time': start_time
|
||||
'temperature':temperature,
|
||||
}
|
||||
plugin_kwargs = {
|
||||
"advanced_arg": plugin_advanced_arg,
|
||||
"parameters_def": ''
|
||||
}
|
||||
if len(args) > 1:
|
||||
plugin_kwargs.update({'parameters_def': args[1]})
|
||||
transparent_address_private = f'<p style="display:none;">\n{private_key}\n{ipaddr.client.host}\n</p>'
|
||||
transparent_address = f'<p style="display:none;">\n{ipaddr.client.host}\n</p>'
|
||||
if private in models:
|
||||
if chatbot == []:
|
||||
chatbot.append([None, f'隐私模式, 你的对话记录无法被他人检索 {transparent_address_private}'])
|
||||
else:
|
||||
chatbot[0] = [None, f'隐私模式, 你的对话记录无法被他人检索 {transparent_address_private}']
|
||||
else:
|
||||
if chatbot == []:
|
||||
chatbot.append([None, f'正常对话模式, 你接来下的对话将会被记录并且可以被所有人检索,你可以到Settings中选择隐私模式 {transparent_address}'])
|
||||
else:
|
||||
chatbot[0] = [None, f'正常对话模式, 你接来下的对话将会被记录并且可以被所有人检索,你可以到Settings中选择隐私模式 {transparent_address}']
|
||||
chatbot_with_cookie = ChatBotWithCookies(cookies)
|
||||
chatbot_with_cookie.write_list(chatbot)
|
||||
txt_passon = txt
|
||||
if encrypt in models: txt_passon = func_box.encryption_str(txt)
|
||||
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
|
||||
return decorated
|
||||
|
||||
|
||||
|
||||
def update_ui(chatbot, history, msg='正常', *args): # 刷新界面
|
||||
def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
|
||||
"""
|
||||
刷新用户界面
|
||||
"""
|
||||
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。"
|
||||
yield chatbot.get_cookies(), chatbot, history, msg
|
||||
threading.Thread(target=func_box.thread_write_chat, args=(chatbot, history)).start()
|
||||
# func_box.thread_write_chat(chatbot, history)
|
||||
|
||||
def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面
|
||||
"""
|
||||
@ -159,14 +126,9 @@ def HotReload(f):
|
||||
def decorated(*args, **kwargs):
|
||||
fn_name = f.__name__
|
||||
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
|
||||
try:
|
||||
yield from f_hot_reload(*args, **kwargs)
|
||||
except TypeError:
|
||||
args = tuple(args[element] for element in range(len(args)) if element != 6)
|
||||
yield from f_hot_reload(*args, **kwargs)
|
||||
yield from f_hot_reload(*args, **kwargs)
|
||||
return decorated
|
||||
|
||||
####################################### 其他小工具 #####################################
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
@ -230,7 +192,8 @@ def write_results_to_file(history, file_name=None):
|
||||
# remove everything that cannot be handled by utf8
|
||||
f.write(content.encode('utf-8', 'ignore').decode())
|
||||
f.write('\n\n')
|
||||
res = '以上材料已经被写入' + f'./gpt_log/{file_name}'
|
||||
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
|
||||
print(res)
|
||||
return res
|
||||
|
||||
|
||||
@ -255,51 +218,37 @@ def report_execption(chatbot, history, a, b):
|
||||
history.append(b)
|
||||
|
||||
|
||||
import re
|
||||
def text_divide_paragraph(input_str):
|
||||
if input_str:
|
||||
code_blocks = re.findall(r'```[\s\S]*?```', input_str)
|
||||
def text_divide_paragraph(text):
|
||||
"""
|
||||
将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
if text.startswith(pre) and text.endswith(suf):
|
||||
return text
|
||||
|
||||
if '```' in text:
|
||||
# careful input
|
||||
return pre + text + suf
|
||||
else:
|
||||
# wtf input
|
||||
lines = text.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
lines[i] = lines[i].replace(" ", " ")
|
||||
text = "</br>".join(lines)
|
||||
return pre + text + suf
|
||||
|
||||
for i, block in enumerate(code_blocks):
|
||||
input_str = input_str.replace(block, f'{{{{CODE_BLOCK_{i}}}}}')
|
||||
|
||||
if code_blocks:
|
||||
sections = re.split(r'({{{{\w+}}}})', input_str)
|
||||
for idx, section in enumerate(sections):
|
||||
if 'CODE_BLOCK' in section or section.startswith(' '):
|
||||
continue
|
||||
sections[idx] = re.sub(r'(?!```)(?<!\n)\n(?!(\n|^)( {0,3}[\*\+\-]|[0-9]+\.))', '\n\n', section)
|
||||
input_str = ''.join(sections)
|
||||
|
||||
for i, block in enumerate(code_blocks):
|
||||
input_str = input_str.replace(f'{{{{CODE_BLOCK_{i}}}}}', block.replace('\n', '\n'))
|
||||
else:
|
||||
lines = input_str.split('\n')
|
||||
for idx, line in enumerate(lines[:-1]):
|
||||
if not line.strip():
|
||||
continue
|
||||
if not (lines[idx + 1].startswith(' ') or lines[idx + 1].startswith('\t')):
|
||||
lines[idx] += '\n' # 将一个换行符替换为两个换行符
|
||||
input_str = '\n'.join(lines)
|
||||
|
||||
return input_str
|
||||
|
||||
|
||||
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
|
||||
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
|
||||
def markdown_convertion(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
"""
|
||||
pre = '<div class="md-message">'
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
raw_pre = '<div class="raw-message hideM">'
|
||||
raw_suf = '</div>'
|
||||
if txt.startswith(pre) and txt.endswith(suf):
|
||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
if txt.startswith(raw_pre) and txt.endswith(raw_suf):
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
raw_hide = raw_pre + txt + raw_suf
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
|
||||
markdown_extension_configs = {
|
||||
'mdx_math': {
|
||||
'enable_dollar_delimiter': True,
|
||||
@ -308,6 +257,13 @@ def markdown_convertion(txt):
|
||||
}
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
|
||||
def tex2mathml_catch_exception(content, *args, **kwargs):
|
||||
try:
|
||||
content = tex2mathml(content, *args, **kwargs)
|
||||
except:
|
||||
content = content
|
||||
return content
|
||||
|
||||
def replace_math_no_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
@ -323,47 +279,40 @@ def markdown_convertion(txt):
|
||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
||||
content = content.replace('&', ' ')
|
||||
content = tex2mathml(content, display="block")
|
||||
content = tex2mathml_catch_exception(content, display="block")
|
||||
return content
|
||||
else:
|
||||
return tex2mathml(content)
|
||||
return tex2mathml_catch_exception(content)
|
||||
|
||||
def markdown_bug_hunt(content):
|
||||
"""
|
||||
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
||||
"""
|
||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">',
|
||||
'<script type="math/tex; mode=display">')
|
||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
|
||||
content = content.replace('</script>\n</script>', '</script>')
|
||||
return content
|
||||
|
||||
def no_code(txt):
|
||||
if '```' not in txt:
|
||||
if '```' not in txt:
|
||||
return True
|
||||
else:
|
||||
if '```reference' in txt:
|
||||
return True # newbing
|
||||
else:
|
||||
return False
|
||||
if '```reference' in txt: return True # newbing
|
||||
else: return False
|
||||
|
||||
if ('$$' in txt) and no_code(txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
if ('$' in txt) and no_code(txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text='---')
|
||||
txt = re.sub(r'\$\$((?:.|\n)*?)\$\$', lambda match: '$$' + re.sub(r'\n+', '</br>', match.group(1)) + '$$', txt)
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
||||
# 1. convert to easy-to-copy tex (do not render math)
|
||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_1_resp = convert_stage_1.replace('</br>', '')
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1_resp, flags=re.DOTALL)
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
||||
# cat them together
|
||||
context = pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
return raw_hide + context # 破坏html 结构,并显示源码
|
||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
else:
|
||||
context = pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
||||
return raw_hide + context # 破坏html 结构,并显示源码
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
||||
|
||||
|
||||
def close_up_code_segment_during_stream(gpt_reply):
|
||||
@ -377,9 +326,9 @@ def close_up_code_segment_during_stream(gpt_reply):
|
||||
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
||||
|
||||
"""
|
||||
if '```' not in str(gpt_reply):
|
||||
if '```' not in gpt_reply:
|
||||
return gpt_reply
|
||||
if str(gpt_reply).endswith('```'):
|
||||
if gpt_reply.endswith('```'):
|
||||
return gpt_reply
|
||||
|
||||
# 排除了以上两个情况,我们
|
||||
@ -405,8 +354,7 @@ def format_io(self, y):
|
||||
if gpt_reply is not None: gpt_reply = close_up_code_segment_during_stream(gpt_reply)
|
||||
# process
|
||||
y[-1] = (
|
||||
# None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
|
||||
None if i_ask is None else markdown_convertion(i_ask),
|
||||
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
|
||||
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
||||
)
|
||||
return y
|
||||
@ -504,51 +452,42 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
||||
else: current = []
|
||||
chatbot._cookies.update({'file_to_promote': [new_path] + current})
|
||||
|
||||
|
||||
def get_user_upload(chatbot, ipaddr: gr.Request):
|
||||
"""
|
||||
获取用户上传过的文件
|
||||
"""
|
||||
private_upload = './private_upload'
|
||||
user_history = os.path.join(private_upload, ipaddr.client.host)
|
||||
history = """| 编号 | 目录 | 目录内文件 |\n| --- | --- | --- |\n"""
|
||||
count_num = 1
|
||||
for root, d, file in os.walk(user_history):
|
||||
file_link = "<br>".join([f'{func_box.html_view_blank(f"{root}/{i}")}' for i in file])
|
||||
history += f'| {count_num} | {root} | {file_link} |\n'
|
||||
count_num += 1
|
||||
chatbot.append(['Load Submission History....',
|
||||
f'[Local Message] 请自行复制以下目录 or 目录+文件, 填入输入框以供函数区高亮按钮使用\n\n'
|
||||
f'{func_box.html_tag_color("提交前记得请检查头尾空格哦~")}\n\n'
|
||||
f'{history}'
|
||||
])
|
||||
return chatbot
|
||||
|
||||
|
||||
def on_file_uploaded(files, chatbot, txt, ipaddr: gr.Request):
|
||||
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
|
||||
"""
|
||||
当文件被上传时的回调函数
|
||||
"""
|
||||
if len(files) == 0:
|
||||
return chatbot, txt
|
||||
private_upload = './private_upload'
|
||||
# shutil.rmtree('./private_upload/') 不需要删除文件
|
||||
time_tag_path = os.path.join(private_upload, ipaddr.client.host, time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()))
|
||||
os.makedirs(f'{time_tag_path}', exist_ok=True)
|
||||
import shutil
|
||||
import os
|
||||
import time
|
||||
import glob
|
||||
from toolbox import extract_archive
|
||||
try:
|
||||
shutil.rmtree('./private_upload/')
|
||||
except:
|
||||
pass
|
||||
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
||||
err_msg = ''
|
||||
for file in files:
|
||||
file_origin_name = os.path.basename(file.orig_name)
|
||||
shutil.copy(file.name, f'{time_tag_path}/{file_origin_name}')
|
||||
err_msg += extract_archive(f'{time_tag_path}/{file_origin_name}',
|
||||
dest_dir=f'{time_tag_path}/{file_origin_name}.extract')
|
||||
moved_files = [fp for fp in glob.glob(f'{time_tag_path}/**/*', recursive=True)]
|
||||
txt = f'{time_tag_path}'
|
||||
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
||||
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
||||
dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
|
||||
moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)]
|
||||
if "底部输入区" in checkboxes:
|
||||
txt = ""
|
||||
txt2 = f'private_upload/{time_tag}'
|
||||
else:
|
||||
txt = f'private_upload/{time_tag}'
|
||||
txt2 = ""
|
||||
moved_files_str = '\t\n\n'.join(moved_files)
|
||||
chatbot.append([None,
|
||||
chatbot.append(['我上传了文件,请查收',
|
||||
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
||||
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
||||
f'\n\n现在您点击任意“高亮”标识的函数插件时,以上文件将被作为输入参数'+err_msg])
|
||||
return chatbot, txt
|
||||
f'\n\n现在您点击任意“红颜色”标识的函数插件时,以上文件将被作为输入参数'+err_msg])
|
||||
return chatbot, txt, txt2
|
||||
|
||||
|
||||
def on_report_generated(cookies, files, chatbot):
|
||||
@ -577,13 +516,6 @@ def is_api2d_key(key):
|
||||
else:
|
||||
return False
|
||||
|
||||
def is_proxy_key(key):
|
||||
if key.startswith('proxy-') and len(key) == 38:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def is_any_api_key(key):
|
||||
if ',' in key:
|
||||
keys = key.split(',')
|
||||
@ -591,7 +523,7 @@ def is_any_api_key(key):
|
||||
if is_any_api_key(k): return True
|
||||
return False
|
||||
else:
|
||||
return is_openai_api_key(key) or is_api2d_key(key) or is_proxy_key(key)
|
||||
return is_openai_api_key(key) or is_api2d_key(key)
|
||||
|
||||
def what_keys(keys):
|
||||
avail_key_list = {'OpenAI Key':0, "API2D Key":0}
|
||||
@ -605,14 +537,7 @@ def what_keys(keys):
|
||||
if is_api2d_key(k):
|
||||
avail_key_list['API2D Key'] += 1
|
||||
|
||||
for k in key_list:
|
||||
if is_proxy_key(k):
|
||||
avail_key_list['Proxy Key'] += 1
|
||||
|
||||
return f"检测到: \n" \
|
||||
f"OpenAI Key {avail_key_list['OpenAI Key']} 个\n" \
|
||||
f"API2D Key {avail_key_list['API2D Key']} 个\n" \
|
||||
f"Proxy Key {avail_key_list['API2D Key']} 个\n"
|
||||
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个,API2D Key {avail_key_list['API2D Key']} 个"
|
||||
|
||||
def select_api_key(keys, llm_model):
|
||||
import random
|
||||
@ -627,10 +552,6 @@ def select_api_key(keys, llm_model):
|
||||
for k in key_list:
|
||||
if is_api2d_key(k): avail_key_list.append(k)
|
||||
|
||||
if llm_model.startswith('proxy-'):
|
||||
for k in key_list:
|
||||
if is_proxy_key(k): avail_key_list.append(k.replace('proxy-', ''))
|
||||
|
||||
if len(avail_key_list) == 0:
|
||||
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源。")
|
||||
|
||||
@ -701,12 +622,6 @@ def read_single_conf_with_lru_cache(arg):
|
||||
except:
|
||||
try:
|
||||
# 优先级2. 获取config_private中的配置
|
||||
# 获取当前文件所在目录的路径
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# 获取上一层目录的路径
|
||||
parent_dir = os.path.dirname(current_dir)
|
||||
# 将上一层目录添加到Python的搜索路径中
|
||||
sys.path.append(parent_dir)
|
||||
r = getattr(importlib.import_module('config_private'), arg)
|
||||
except:
|
||||
# 优先级3. 获取config中的配置
|
||||
@ -761,7 +676,6 @@ class DummyWith():
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
return
|
||||
|
||||
|
||||
def run_gradio_in_subpath(demo, auth, port, custom_path):
|
||||
"""
|
||||
把gradio的运行地址更改到指定的二次路径上
|
||||
@ -928,3 +842,4 @@ def objload(file='objdump.tmp'):
|
||||
return
|
||||
with open(file, 'rb') as f:
|
||||
return pickle.load(f)
|
||||
|
||||
4
version
4
version
@ -1,5 +1,5 @@
|
||||
{
|
||||
"version": 3.42,
|
||||
"version": 3.43,
|
||||
"show_feature": true,
|
||||
"new_feature": "完善本地Latex矫错和翻译功能 <-> 增加gpt-3.5-16k的支持 <-> 新增最强Arxiv论文翻译插件 <-> 修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持"
|
||||
"new_feature": "修复Azure接口的BUG <-> 完善多语言模块 <-> 完善本地Latex矫错和翻译功能 <-> 增加gpt-3.5-16k的支持 <-> 新增最强Arxiv论文翻译插件 <-> 修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件"
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user