Compare commits

..

4 Commits

Author SHA1 Message Date
c1680076fa 适配TOP-DOWN布局 2023-07-08 02:13:26 +08:00
0fc1a32b73 解决状态栏跳动的问题 2023-07-08 02:07:25 +08:00
05376c1428 auto chatbot height 2023-07-08 01:21:59 +08:00
bcbcfdc52c 调试多次交互接口程序 2023-07-06 23:56:55 +08:00
13 changed files with 296 additions and 121 deletions

View File

@ -70,7 +70,7 @@ MAX_RETRY = 2
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt35", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] # P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
@ -109,10 +109,10 @@ SLACK_CLAUDE_USER_TOKEN = ''
# 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md # 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md
AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/" AZURE_ENDPOINT = "https://你的api名称.openai.azure.com/"
AZURE_API_KEY = "填入azure openai api的密钥" AZURE_API_KEY = "填入azure openai api的密钥"
AZURE_API_VERSION = "2023-05-15" # 一般不修改 AZURE_API_VERSION = "填入api版本"
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md AZURE_ENGINE = "填入ENGINE"
# 使用Newbing # 使用Newbing

View File

@ -353,6 +353,18 @@ def get_crazy_functions():
except: except:
print('Load function plugin failed') print('Load function plugin failed')
try:
from crazy_functions.交互功能函数模板 import 交互功能模板函数
function_plugins.update({
"交互功能模板函数": {
"Color": "stop",
# "AsButton": False,
"Function": HotReload(交互功能模板函数)
}
})
except:
print('Load function plugin failed')
try: try:
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比 from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
function_plugins.update({ function_plugins.update({

View File

@ -130,11 +130,6 @@ def request_gpt_model_in_new_thread_with_ui_alive(
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息 yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
return final_result return final_result
def can_multi_process(llm):
if llm.startswith('gpt-'): return True
if llm.startswith('api2d-'): return True
if llm.startswith('azure-'): return True
return False
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array, inputs_show_user_array, llm_kwargs, inputs_array, inputs_show_user_array, llm_kwargs,
@ -180,7 +175,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
except: max_workers = 8 except: max_workers = 8
if max_workers <= 0: max_workers = 3 if max_workers <= 0: max_workers = 3
# 屏蔽掉 chatglm的多线程可能会导致严重卡顿 # 屏蔽掉 chatglm的多线程可能会导致严重卡顿
if not can_multi_process(llm_kwargs['llm_model']): if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
max_workers = 1 max_workers = 1
executor = ThreadPoolExecutor(max_workers=max_workers) executor = ThreadPoolExecutor(max_workers=max_workers)

View File

@ -189,18 +189,6 @@ def rm_comments(main_file):
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串 main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
return main_file return main_file
def find_tex_file_ignore_case(fp):
dir_name = os.path.dirname(fp)
base_name = os.path.basename(fp)
if not base_name.endswith('.tex'): base_name+='.tex'
if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name)
# go case in-sensitive
import glob
for f in glob.glob(dir_name+'/*.tex'):
base_name_s = os.path.basename(fp)
if base_name_s.lower() == base_name.lower(): return f
return None
def merge_tex_files_(project_foler, main_file, mode): def merge_tex_files_(project_foler, main_file, mode):
""" """
Merge Tex project recrusively Merge Tex project recrusively
@ -209,11 +197,15 @@ def merge_tex_files_(project_foler, main_file, mode):
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]): for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
f = s.group(1) f = s.group(1)
fp = os.path.join(project_foler, f) fp = os.path.join(project_foler, f)
fp = find_tex_file_ignore_case(fp) if os.path.exists(fp):
if fp: # e.g., \input{srcs/07_appendix.tex}
with open(fp, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read() with open(fp, 'r', encoding='utf-8', errors='replace') as fx:
c = fx.read()
else: else:
raise RuntimeError(f'找不到{fp}Tex源文件缺失') # e.g., \input{srcs/07_appendix}
assert os.path.exists(fp+'.tex'), f'即找不到{fp},也找不到{fp}.texTex源文件缺失'
with open(fp+'.tex', 'r', encoding='utf-8', errors='replace') as fx:
c = fx.read()
c = merge_tex_files_(project_foler, c, mode) c = merge_tex_files_(project_foler, c, mode)
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:] main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
return main_file return main_file

View File

@ -0,0 +1,63 @@
from toolbox import CatchException, update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@CatchException
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
plugin_kwargs 插件模型的参数, 如温度和top_p等, 一般原样传递下去就行
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
state = chatbot._cookies.get('plugin_state_0001', None) # 初始化插件状态
if state is None:
chatbot._cookies['lock_plugin'] = 'crazy_functions.交互功能函数模板->交互功能模板函数' # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
chatbot._cookies['plugin_state_0001'] = 'wait_user_keyword' # 赋予插件状态
chatbot.append(("第一次调用:", "请输入关键词, 我将为您查找相关壁纸, 建议使用英文单词, 插件锁定中,请直接提交即可。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
if state == 'wait_user_keyword':
chatbot._cookies['lock_plugin'] = None # 解除插件锁定,避免遗忘导致死锁
chatbot._cookies['plugin_state_0001'] = None # 解除插件状态,避免遗忘导致死锁
# 解除插件锁定
chatbot.append((f"获取关键词:{txt}", ""))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
page_return = get_image_page_by_keyword(txt)
inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=inputs, inputs_show_user=inputs_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'"
)
chatbot[-1] = [chatbot[-1][0], gpt_say]
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# ---------------------------------------------------------------------------------
def get_image_page_by_keyword(keyword):
import requests
from bs4 import BeautifulSoup
response = requests.get(f'https://wallhaven.cc/search?q={keyword}', timeout=2)
res = "image urls: \n"
for image_element in BeautifulSoup(response.content, 'html.parser').findAll("img"):
try:
res += image_element["data-src"]
res += "\n"
except:
pass
return res

View File

@ -14,19 +14,17 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
doc = Document(fp) doc = Document(fp)
file_content = "\n".join([para.text for para in doc.paragraphs]) file_content = "\n".join([para.text for para in doc.paragraphs])
else: else:
try: import win32com.client
import win32com.client word = win32com.client.Dispatch("Word.Application")
word = win32com.client.Dispatch("Word.Application") word.visible = False
word.visible = False # 打开文件
# 打开文件 print('fp', os.getcwd())
doc = word.Documents.Open(os.getcwd() + '/' + fp) doc = word.Documents.Open(os.getcwd() + '/' + fp)
# file_content = doc.Content.Text # file_content = doc.Content.Text
doc = word.ActiveDocument doc = word.ActiveDocument
file_content = doc.Range().Text file_content = doc.Range().Text
doc.Close() doc.Close()
word.Quit() word.Quit()
except:
raise RuntimeError('请先将.doc文档转换为.docx文档。')
print(file_content) print(file_content)
# private_upload里面的文件名在解压zip后容易出现乱码rar和7z格式正常故可以只分析文章内容不输入文件名 # private_upload里面的文件名在解压zip后容易出现乱码rar和7z格式正常故可以只分析文章内容不输入文件名

View File

@ -90,29 +90,62 @@
到现在为止,申请操作就完成了,需要记下来的有下面几个东西: 到现在为止,申请操作就完成了,需要记下来的有下面几个东西:
● 密钥(对应AZURE_API_KEY1或2都可以  密钥1或2都可以
● 终结点 对应AZURE_ENDPOINT ● 终结点
 部署名对应AZURE_ENGINE不是模型名
● 部署名(不是模型名)
# 修改 config.py # 修改 config.py
``` ```
LLM_MODEL = "azure-gpt-3.5" # 指定启动时的默认模型当然事后从下拉菜单选也ok AZURE_ENDPOINT = "填入终结点"
AZURE_ENDPOINT = "填入终结点" # 见上述图片
AZURE_API_KEY = "填入azure openai api的密钥" AZURE_API_KEY = "填入azure openai api的密钥"
AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改 AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改
AZURE_ENGINE = "填入部署名" # 见上述图片 AZURE_ENGINE = "填入部署名"
```
# API的使用
接下来就是具体怎么使用API了还是可以参考官方文档[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python)
和openai自己的api调用有点类似都需要安装openai库不同的是调用方式
```
import openai
openai.api_type = "azure" #固定格式,无需修改
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT") #这里填入“终结点”
openai.api_version = "2023-05-15" #固定格式,无需修改
openai.api_key = os.getenv("AZURE_OPENAI_KEY") #这里填入“密钥1”或“密钥2”
response = openai.ChatCompletion.create(
engine="gpt-35-turbo", #这里填入的不是模型名,是部署名
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Does Azure OpenAI support customer managed keys?"},
{"role": "assistant", "content": "Yes, customer managed keys are supported by Azure OpenAI."},
{"role": "user", "content": "Do other Azure Cognitive Services support this too?"}
]
)
print(response)
print(response['choices'][0]['message']['content'])
``` ```
需要注意的是:
1.  engine那里填入的是部署名不是模型名
2.  通过openai库获得的这个 response 和通过 request 库访问 url 获得的 response 不同,不需要 decode已经是解析好的 json 了,直接根据键值读取即可。
更细节的使用方法详见官方API文档。
# 关于费用 # 关于费用
Azure OpenAI API 还是需要一些费用的免费订阅只有1个月有效期 Azure OpenAI API 还是需要一些费用的免费订阅只有1个月有效期,费用如下:
![image.png](https://note.youdao.com/yws/res/18095/WEBRESOURCEeba0ab6d3127b79e143ef2d5627c0e44)
具体可以可以看这个网址 [Azure OpenAI 服务 - 定价| Microsoft Azure](https://azure.microsoft.com/zh-cn/pricing/details/cognitive-services/openai-service/?cdn=disable) 具体可以可以看这个网址 [Azure OpenAI 服务 - 定价| Microsoft Azure](https://azure.microsoft.com/zh-cn/pricing/details/cognitive-services/openai-service/?cdn=disable)

33
main.py
View File

@ -45,10 +45,10 @@ def main():
proxy_info = check_proxy(proxies) proxy_info = check_proxy(proxies)
gr_L1 = lambda: gr.Row().style() gr_L1 = lambda: gr.Row().style()
gr_L2 = lambda scale: gr.Column(scale=scale) gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id)
if LAYOUT == "TOP-DOWN": if LAYOUT == "TOP-DOWN":
gr_L1 = lambda: DummyWith() gr_L1 = lambda: DummyWith()
gr_L2 = lambda scale: gr.Row() gr_L2 = lambda scale, elem_id: gr.Row()
CHATBOT_HEIGHT /= 2 CHATBOT_HEIGHT /= 2
cancel_handles = [] cancel_handles = []
@ -56,12 +56,12 @@ def main():
gr.HTML(title_html) gr.HTML(title_html)
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL}) cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
with gr_L1(): with gr_L1():
with gr_L2(scale=2): with gr_L2(scale=2, elem_id="gpt-chat"):
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}") chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
chatbot.style(height=CHATBOT_HEIGHT) if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
history = gr.State([]) history = gr.State([])
with gr_L2(scale=1): with gr_L2(scale=1, elem_id="gpt-panel"):
with gr.Accordion("输入区", open=True) as area_input_primary: with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
with gr.Row(): with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False) txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
with gr.Row(): with gr.Row():
@ -71,14 +71,14 @@ def main():
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm") clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
with gr.Row(): with gr.Row():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}") status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
with gr.Accordion("基础功能区", open=True) as area_basic_fn: with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
with gr.Row(): with gr.Row():
for k in functional: for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant) functional[k]["Button"] = gr.Button(k, variant=variant)
with gr.Accordion("函数插件区", open=True) as area_crazy_fn: with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
with gr.Row(): with gr.Row():
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.") gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
with gr.Row(): with gr.Row():
@ -100,7 +100,7 @@ def main():
with gr.Row(): with gr.Row():
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up: with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple") file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")): with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN"), elem_id="interact-panel"):
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
@ -109,7 +109,7 @@ def main():
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
gr.Markdown(description) gr.Markdown(description)
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary: with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
with gr.Row(): with gr.Row():
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False) txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
with gr.Row(): with gr.Row():
@ -155,7 +155,7 @@ def main():
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
cancel_handles.append(click_handle) cancel_handles.append(click_handle)
# 文件上传区接收文件后与chatbot的互动 # 文件上传区接收文件后与chatbot的互动
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2]) file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2])
# 函数插件-固定按钮区 # 函数插件-固定按钮区
for k in crazy_fns: for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue if not crazy_fns[k].get("AsButton", True): continue
@ -185,12 +185,7 @@ def main():
# 终止按钮的回调函数注册 # 终止按钮的回调函数注册
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
def init_cookie(cookies, chatbot): demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{ChatBotHeight();}')
# 为每一位访问的用户赋予一个独一无二的uuid编码
import uuid
cookies.update({'user-uuid': 'user-'+uuid.uuid4().hex})
return cookies
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数 # gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def auto_opentab_delay(): def auto_opentab_delay():

View File

@ -121,7 +121,7 @@ model_info = {
}, },
# azure openai # azure openai
"azure-gpt-3.5":{ "azure-gpt35":{
"fn_with_ui": azure_ui, "fn_with_ui": azure_ui,
"fn_without_ui": azure_noui, "fn_without_ui": azure_noui,
"endpoint": get_conf("AZURE_ENDPOINT"), "endpoint": get_conf("AZURE_ENDPOINT"),

View File

@ -14,8 +14,7 @@ import traceback
import importlib import importlib
import openai import openai
import time import time
import requests
import json
# 读取config.py文件中关于AZURE OPENAI API的信息 # 读取config.py文件中关于AZURE OPENAI API的信息
from toolbox import get_conf, update_ui, clip_history, trimmed_format_exc from toolbox import get_conf, update_ui, clip_history, trimmed_format_exc
@ -44,6 +43,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chatbot 为WebUI中显示的对话列表修改它然后yeild出去可以直接修改对话界面内容 chatbot 为WebUI中显示的对话列表修改它然后yeild出去可以直接修改对话界面内容
additional_fn代表点击的哪个按钮按钮见functional.py additional_fn代表点击的哪个按钮按钮见functional.py
""" """
print(llm_kwargs["llm_model"])
if additional_fn is not None: if additional_fn is not None:
import core_functional import core_functional
@ -57,6 +57,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chatbot.append((inputs, "")) chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt, stream) payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt, stream)
history.append(inputs); history.append("") history.append(inputs); history.append("")
@ -64,21 +65,19 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
retry = 0 retry = 0
while True: while True:
try: try:
openai.api_type = "azure" openai.api_type = "azure"
openai.api_version = AZURE_API_VERSION openai.api_version = AZURE_API_VERSION
openai.api_base = AZURE_ENDPOINT openai.api_base = AZURE_ENDPOINT
openai.api_key = AZURE_API_KEY openai.api_key = AZURE_API_KEY
response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break
except openai.error.AuthenticationError:
tb_str = '```\n' + trimmed_format_exc() + '```'
chatbot[-1] = [chatbot[-1][0], tb_str]
yield from update_ui(chatbot=chatbot, history=history, msg="openai返回错误") # 刷新界面
return
except: except:
retry += 1 retry += 1
traceback.print_exc() chatbot[-1] = ((chatbot[-1][0], "获取response失败重试中。。。"))
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
if retry > MAX_RETRY: raise TimeoutError if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
gpt_replying_buffer = "" gpt_replying_buffer = ""
is_head_of_the_stream = True is_head_of_the_stream = True
@ -142,18 +141,21 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) payload = generate_azure_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
retry = 0 retry = 0
while True: while True:
try: try:
openai.api_type = "azure" openai.api_type = "azure"
openai.api_version = AZURE_API_VERSION openai.api_version = AZURE_API_VERSION
openai.api_base = AZURE_ENDPOINT openai.api_base = AZURE_ENDPOINT
openai.api_key = AZURE_API_KEY openai.api_key = AZURE_API_KEY
response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break response = openai.ChatCompletion.create(timeout=TIMEOUT_SECONDS, **payload);break
except: except:
retry += 1 retry += 1
traceback.print_exc() traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
stream_response = response stream_response = response
result = '' result = ''
while True: while True:
@ -162,14 +164,19 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
break break
except: except:
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
if len(chunk)==0: continue
json_data = json.loads(str(chunk))['choices'][0] if len(chunk)==0: continue
delta = json_data["delta"] if not chunk.startswith('data:'):
if len(delta) == 0: error_msg = get_full_error(chunk, stream_response)
break if "reduce the length" in error_msg:
if "role" in delta: raise ConnectionAbortedError("AZURE OPENAI API拒绝了请求:" + error_msg)
continue else:
raise RuntimeError("AZURE OPENAI API拒绝了请求" + error_msg)
if ('data: [DONE]' in chunk): break
delta = chunk["delta"]
if len(delta) == 0: break
if "role" in delta: continue
if "content" in delta: if "content" in delta:
result += delta["content"] result += delta["content"]
if not console_slience: print(delta["content"], end='') if not console_slience: print(delta["content"], end='')
@ -177,14 +184,11 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
# 观测窗,把已经获取的数据显示出去 # 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1: observe_window[0] += delta["content"] if len(observe_window) >= 1: observe_window[0] += delta["content"]
# 看门狗,如果超过期限没有喂狗,则终止 # 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2000: if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience: if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。") raise RuntimeError("用户取消了程序。")
else: else: raise RuntimeError("意外Json结构"+delta)
raise RuntimeError("意外Json结构"+delta) if chunk['finish_reason'] == 'length':
if json_data['finish_reason'] == 'content_filter':
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("正常结束但显示Token不足导致输出不完整请削减单次输入的文本量。") raise ConnectionAbortedError("正常结束但显示Token不足导致输出不完整请削减单次输入的文本量。")
return result return result

View File

@ -1,6 +1,6 @@
import gradio as gr import gradio as gr
from toolbox import get_conf from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU') CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
# gradio可用颜色列表 # gradio可用颜色列表
# gr.themes.utils.colors.slate (石板色) # gr.themes.utils.colors.slate (石板色)
# gr.themes.utils.colors.gray (灰色) # gr.themes.utils.colors.gray (灰色)
@ -82,20 +82,73 @@ def adjust_theme():
button_cancel_text_color_dark="white", button_cancel_text_color_dark="white",
) )
# Layout = "LEFT-RIGHT"
js = """
<script>
function ChatBotHeight() {
function update_height(){
var { panel_height_target, chatbot_height, chatbot } = get_elements();
if (panel_height_target!=chatbot_height)
{
var pixelString = panel_height_target.toString() + 'px';
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
}
}
function update_height_slow(){
var { panel_height_target, chatbot_height, chatbot } = get_elements();
if (panel_height_target!=chatbot_height)
{
new_panel_height = (panel_height_target - chatbot_height)*0.5 + chatbot_height;
if (Math.abs(new_panel_height - panel_height_target) < 10){
new_panel_height = panel_height_target;
}
console.log(chatbot_height, panel_height_target, new_panel_height);
var pixelString = new_panel_height.toString() + 'px';
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
}
}
update_height();
setInterval(function() {
update_height_slow()
}, 50); // 每100毫秒执行一次
}
function get_elements() {
const chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
const panel1 = document.querySelector('#input-panel');
const panel2 = document.querySelector('#basic-panel');
const panel3 = document.querySelector('#plugin-panel');
const panel4 = document.querySelector('#interact-panel');
const panel5 = document.querySelector('#input-panel2');
const panel_active = document.querySelector('#state-panel');
var panel_height_target = (20-panel_active.offsetHeight) + panel1.offsetHeight + panel2.offsetHeight + panel3.offsetHeight + panel4.offsetHeight + panel5.offsetHeight + 21;
var panel_height_target = parseInt(panel_height_target);
var chatbot_height = chatbot.style.height;
var chatbot_height = parseInt(chatbot_height);
return { panel_height_target, chatbot_height, chatbot };
}
</script>
"""
if LAYOUT=="TOP-DOWN":
js = ""
# 添加一个萌萌的看板娘 # 添加一个萌萌的看板娘
if ADD_WAIFU: if ADD_WAIFU:
js = """ js += """
<script src="file=docs/waifu_plugin/jquery.min.js"></script> <script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script> <script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script> <script src="file=docs/waifu_plugin/autoload.js"></script>
""" """
gradio_original_template_fn = gr.routes.templates.TemplateResponse gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs): def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs) res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8")) res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers() res.init_headers()
return res return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except: except:
set_theme = None set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色') print('gradio版本较旧, 不能自定义字体和颜色')

View File

@ -4,6 +4,7 @@ import time
import inspect import inspect
import re import re
import os import os
import gradio
from latex2mathml.converter import convert as tex2mathml from latex2mathml.converter import convert as tex2mathml
from functools import wraps, lru_cache from functools import wraps, lru_cache
pj = os.path.join pj = os.path.join
@ -35,17 +36,19 @@ class ChatBotWithCookies(list):
def get_cookies(self): def get_cookies(self):
return self._cookies return self._cookies
black_list = ['127.0.0.1']
def ArgsGeneralWrapper(f): def ArgsGeneralWrapper(f):
""" """
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。 装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
""" """
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args): def decorated(request: gradio.Request, cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
txt_passon = txt txt_passon = txt
if txt == "" and txt2 != "": txt_passon = txt2 if txt == "" and txt2 != "": txt_passon = txt2
# 引入一个有cookie的chatbot # 引入一个有cookie的chatbot
cookies.update({ cookies.update({
'top_p':top_p, 'top_p':top_p,
'llm_model': llm_model,
'temperature':temperature, 'temperature':temperature,
}) })
llm_kwargs = { llm_kwargs = {
@ -54,13 +57,25 @@ def ArgsGeneralWrapper(f):
'top_p':top_p, 'top_p':top_p,
'max_length': max_length, 'max_length': max_length,
'temperature':temperature, 'temperature':temperature,
'client_ip': request.client.host,
} }
plugin_kwargs = { plugin_kwargs = {
"advanced_arg": plugin_advanced_arg, "advanced_arg": plugin_advanced_arg,
} }
chatbot_with_cookie = ChatBotWithCookies(cookies) chatbot_with_cookie = ChatBotWithCookies(cookies)
chatbot_with_cookie.write_list(chatbot) chatbot_with_cookie.write_list(chatbot)
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args) if llm_kwargs['client_ip'] in black_list:
chatbot_with_cookie.append(['IP已封禁, 当前IP黑名单' + str(black_list), 'IP已封禁:' + llm_kwargs['client_ip']])
yield from update_ui(chatbot_with_cookie, history, msg='IP已封禁')
return # 结束
if cookies.get('lock_plugin', None) is None:
# 正常状态
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
else:
# 处理个别特殊插件的锁定状态
module, fn_name = cookies['lock_plugin'].split('->')
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
return decorated return decorated
@ -68,8 +83,22 @@ def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
""" """
刷新用户界面 刷新用户界面
""" """
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时可用clear将其清空然后用for+append循环重新赋值。" assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时, 可用clear将其清空, 然后用for+append循环重新赋值。"
yield chatbot.get_cookies(), chatbot, history, msg
cookies = chatbot.get_cookies()
# 解决插件锁定时的界面显示问题
if cookies.get('lock_plugin', None):
label = cookies.get('llm_model', "") + " | " + "正在锁定插件" + cookies.get('lock_plugin', None)
chatbot_gr = gradio.update(value=chatbot, label=label)
if cookies.get('label', "") != label: cookies['label'] = label # 记住当前的label
elif cookies.get('label', None):
chatbot_gr = gradio.update(value=chatbot, label=cookies.get('llm_model', ""))
cookies['label'] = None # 清空label
else:
chatbot_gr = chatbot
yield cookies, chatbot_gr, history, msg
def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面 def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面
""" """
@ -452,7 +481,7 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
else: current = [] else: current = []
chatbot._cookies.update({'file_to_promote': [new_path] + current}) chatbot._cookies.update({'file_to_promote': [new_path] + current})
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes, cookies): def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
""" """
当文件被上传时的回调函数 当文件被上传时的回调函数
""" """
@ -463,23 +492,24 @@ def on_file_uploaded(files, chatbot, txt, txt2, checkboxes, cookies):
import time import time
import glob import glob
from toolbox import extract_archive from toolbox import extract_archive
user_uuid = cookies.get('user-uuid', 'unknown_user') try:
try: shutil.rmtree(f'./private_upload/{user_uuid}') shutil.rmtree('./private_upload/')
except: pass except:
pass
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
os.makedirs(f'private_upload/{user_uuid}/{time_tag}', exist_ok=True) os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
err_msg = '' err_msg = ''
for file in files: for file in files:
file_origin_name = os.path.basename(file.orig_name) file_origin_name = os.path.basename(file.orig_name)
shutil.copy(file.name, f'private_upload/{user_uuid}/{time_tag}/{file_origin_name}') shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
err_msg += extract_archive(f'private_upload/{user_uuid}/{time_tag}/{file_origin_name}', err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
dest_dir=f'private_upload/{user_uuid}/{time_tag}/{file_origin_name}.extract') dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
moved_files = [fp for fp in glob.glob(f'private_upload/{user_uuid}/**/*', recursive=True)] moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)]
if "底部输入区" in checkboxes: if "底部输入区" in checkboxes:
txt = "" txt = ""
txt2 = f'private_upload/{user_uuid}/{time_tag}' txt2 = f'private_upload/{time_tag}'
else: else:
txt = f'private_upload/{user_uuid}/{time_tag}' txt = f'private_upload/{time_tag}'
txt2 = "" txt2 = ""
moved_files_str = '\t\n\n'.join(moved_files) moved_files_str = '\t\n\n'.join(moved_files)
chatbot.append(['我上传了文件,请查收', chatbot.append(['我上传了文件,请查收',

View File

@ -1,5 +1,5 @@
{ {
"version": 3.43, "version": 3.42,
"show_feature": true, "show_feature": true,
"new_feature": "修复Azure接口的BUG <-> 完善多语言模块 <-> 完善本地Latex矫错和翻译功能 <-> 增加gpt-3.5-16k的支持 <-> 新增最强Arxiv论文翻译插件 <-> 修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件" "new_feature": "完善本地Latex矫错和翻译功能 <-> 增加gpt-3.5-16k的支持 <-> 新增最强Arxiv论文翻译插件 <-> 修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持"
} }