Merge branch 'ui_improvement' into wps_i18n
This commit is contained in:
@ -48,6 +48,9 @@ DEFAULT_WORKER_NUM = 3
|
|||||||
# 对话窗的高度
|
# 对话窗的高度
|
||||||
CHATBOT_HEIGHT = 1115
|
CHATBOT_HEIGHT = 1115
|
||||||
|
|
||||||
|
# 主题
|
||||||
|
THEME = "Default"
|
||||||
|
|
||||||
# 代码高亮
|
# 代码高亮
|
||||||
CODE_HIGHLIGHT = True
|
CODE_HIGHLIGHT = True
|
||||||
|
|
||||||
@ -80,6 +83,9 @@ PROXY_API_URL = '' # 你的网关应用
|
|||||||
# 设置gradio的并行线程数(不需要修改)
|
# 设置gradio的并行线程数(不需要修改)
|
||||||
CONCURRENT_COUNT = 100
|
CONCURRENT_COUNT = 100
|
||||||
|
|
||||||
|
# 是否在提交时自动清空输入框
|
||||||
|
AUTO_CLEAR_TXT = False
|
||||||
|
|
||||||
# 加一个live2d装饰
|
# 加一个live2d装饰
|
||||||
ADD_WAIFU = False
|
ADD_WAIFU = False
|
||||||
|
|
||||||
|
|||||||
@ -195,7 +195,7 @@ def test_Latex():
|
|||||||
# txt = r"https://arxiv.org/abs/2303.08774"
|
# txt = r"https://arxiv.org/abs/2303.08774"
|
||||||
# txt = r"https://arxiv.org/abs/2303.12712"
|
# txt = r"https://arxiv.org/abs/2303.12712"
|
||||||
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
|
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
|
||||||
txt = r"C:\Users\fuqingxu\Desktop\9"
|
txt = r"2306.17157" # 这个paper有个input命令文件名大小写错误!
|
||||||
|
|
||||||
|
|
||||||
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
|||||||
@ -203,6 +203,7 @@ def merge_tex_files_(project_foler, main_file, mode):
|
|||||||
c = fx.read()
|
c = fx.read()
|
||||||
else:
|
else:
|
||||||
# e.g., \input{srcs/07_appendix}
|
# e.g., \input{srcs/07_appendix}
|
||||||
|
assert os.path.exists(fp+'.tex'), f'即找不到{fp},也找不到{fp}.tex,Tex源文件缺失!'
|
||||||
with open(fp+'.tex', 'r', encoding='utf-8', errors='replace') as fx:
|
with open(fp+'.tex', 'r', encoding='utf-8', errors='replace') as fx:
|
||||||
c = fx.read()
|
c = fx.read()
|
||||||
c = merge_tex_files_(project_foler, c, mode)
|
c = merge_tex_files_(project_foler, c, mode)
|
||||||
|
|||||||
@ -27,7 +27,12 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
|
|||||||
}
|
}
|
||||||
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
||||||
print(response.content)
|
print(response.content)
|
||||||
|
|
||||||
|
try:
|
||||||
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
|
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
|
||||||
|
except:
|
||||||
|
raise RuntimeError(response.content.decode())
|
||||||
|
|
||||||
# 文件保存到本地
|
# 文件保存到本地
|
||||||
r = requests.get(image_url, proxies=proxies)
|
r = requests.get(image_url, proxies=proxies)
|
||||||
file_path = 'gpt_log/image_gen/'
|
file_path = 'gpt_log/image_gen/'
|
||||||
|
|||||||
@ -1,4 +1,3 @@
|
|||||||
#! .\venv\
|
|
||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
# @Time : 2023/4/19
|
# @Time : 2023/4/19
|
||||||
# @Author : Spike
|
# @Author : Spike
|
||||||
@ -14,11 +13,13 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
show_say = txt
|
show_say = txt
|
||||||
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
||||||
else:
|
else:
|
||||||
|
|
||||||
show_say = '分析上述回答,再列出用户可能提出的三个问题。'
|
show_say = '分析上述回答,再列出用户可能提出的三个问题。'
|
||||||
try:
|
try:
|
||||||
prompt = history[-1]+f"\n{show_say}"
|
prompt = history[-1]+f"\n{show_say}"
|
||||||
except IndexError:
|
except IndexError:
|
||||||
prompt = system_prompt+"\n再列出用户可能提出的三个问题。"
|
prompt = system_prompt+"\n再列出用户可能提出的三个问题。"
|
||||||
|
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=prompt,
|
inputs=prompt,
|
||||||
inputs_show_user=show_say,
|
inputs_show_user=show_say,
|
||||||
@ -27,5 +28,8 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
history=history,
|
history=history,
|
||||||
sys_prompt=system_prompt
|
sys_prompt=system_prompt
|
||||||
)
|
)
|
||||||
chatbot.append([show_say, gpt_say])
|
|
||||||
|
chatbot[-1] = (show_say, gpt_say)
|
||||||
history.extend([show_say, gpt_say])
|
history.extend([show_say, gpt_say])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
|||||||
9
main.py
9
main.py
@ -6,8 +6,8 @@ def main():
|
|||||||
from request_llm.bridge_all import predict
|
from request_llm.bridge_all import predict
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \
|
||||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS')
|
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
@ -144,6 +144,11 @@ def main():
|
|||||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||||
clearBtn.click(lambda: ("",""), None, [txt, txt2])
|
clearBtn.click(lambda: ("",""), None, [txt, txt2])
|
||||||
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
||||||
|
if AUTO_CLEAR_TXT:
|
||||||
|
submitBtn.click(lambda: ("",""), None, [txt, txt2])
|
||||||
|
submitBtn2.click(lambda: ("",""), None, [txt, txt2])
|
||||||
|
txt.submit(lambda: ("",""), None, [txt, txt2])
|
||||||
|
txt2.submit(lambda: ("",""), None, [txt, txt2])
|
||||||
# 基础功能区的回调函数注册
|
# 基础功能区的回调函数注册
|
||||||
for k in functional:
|
for k in functional:
|
||||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||||
|
|||||||
Reference in New Issue
Block a user