Compare commits

..

18 Commits

Author SHA1 Message Date
6df33c95d4 Merge pull request #841 from KelvinF97/master
Optimize some code and fix some bugs
2023-07-01 22:31:28 +08:00
59877dd728 Local variable 'result' might be referenced before assignment, add else result 2023-07-01 22:27:11 +08:00
a4f187b8dc Merge branch 'master' into master 2023-07-01 22:19:53 +08:00
2da36c7667 Optimizing the code, requests. exceptions. ConnectionError should be written in the post request instead of reading from the iterator. If the post request is unsuccessful, it will not be executed to the iterator step. 2023-06-06 16:10:54 +08:00
0e1de5a184 Optimize the code to make it more readable, catch other exceptions, and avoid response contentless reading exceptions 2023-06-06 10:57:52 +08:00
344579fa79 Every time the function is called, if the list parameter is not explicitly passed, the same default list will be used. This leads to the sharing of the same list object between function calls, resulting in a cumulative effect. 2023-06-06 10:31:28 +08:00
6d7ee17dbd Add zh_ Langchain into dependent files 2023-06-06 09:37:04 +08:00
0a83ba91e9 Add langchain & For safety reasons, try not to use compilation and installation packages from unknown sources or make changes/ Docs/graphic 3.32.2 py3 none any. whl is graphic>=3.33.1 2023-06-06 09:22:50 +08:00
ffd7363c4c need more detailed and comprehensive exception information, it is usually recommended to use the exception object e. Stack trace information can be used as a supplement to understand the context and call relationship of the exception occurrence 2023-06-06 09:13:13 +08:00
b538d31b13 Str is a built-in type and cannot be used directly as a variable name 2023-06-06 09:07:53 +08:00
543a8b98e9 Local variable 'result' might be referenced before assignment, add else result 2023-06-06 08:41:55 +08:00
55c6e9c59a Specify the proxy input type and use the get method to obtain dictionary data to avoid exceptions. Change the timeout to 30 seconds to avoid failures caused by network fluctuations. Obtain abnormal parameters and display them to the front-end for easy troubleshooting 2023-06-06 08:37:37 +08:00
0fc8f740d0 Fix PEP 8: E302 expected 2 blank lines, found 1 & PEP 8: E303 too many blank lines (4) 2023-06-06 08:29:26 +08:00
a019a64e65 PEP 8: E302 expected 2 blank lines, found 1 2023-06-06 08:26:54 +08:00
a75ae327e7 Make it comply with the PEP8 standard and improve PEP 8: E401 multiple imports on one line and PEP 8: E701 multiple statements on one line (colon) 2023-06-06 08:23:56 +08:00
3c38fad4aa PEP 8: E251 unexpected spaces around keyword / parameter equals 2023-06-06 08:14:21 +08:00
bf9731e937 Fix the issue of PEP 8: E401 multiple imports on one line 2023-06-06 08:07:50 +08:00
0f6e3e2dbb Fix the issue of ineffective transfer of reference history 2023-06-06 08:06:46 +08:00
34 changed files with 532 additions and 3425 deletions

BIN
.DS_Store vendored

Binary file not shown.

44
.gitignore vendored
View File

@ -2,14 +2,15 @@
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
plugins/
downloads/
eggs/
.eggs/
@ -25,6 +26,7 @@ share/python-wheels/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
@ -33,6 +35,7 @@ MANIFEST
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
@ -46,64 +49,91 @@ coverage.xml
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
github
.github
TEMP
TRASH
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
site/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.direnv/
.env
.venv
env/
venv*/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
.vscode
.idea
history
ssr_conf
config_private.py
@ -115,12 +145,8 @@ cradle*
debug*
private*
crazy_functions/test_project/pdf_and_word
crazy_fun
ctions/test_samples
crazy_functions/test_samples
request_llm/jittorllms
users_data/*
request_llm/moss
multi-language
request_llm/moss
media
__test.py

View File

@ -1,486 +0,0 @@
import os
import gradio as gr
from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_user_upload, \
get_conf, ArgsGeneralWrapper, DummyWith
# 问询记录, python 版本建议3.9+(越新越好)
import logging
# 一些普通功能模块
from core_functional import get_core_functions
functional = get_core_functions()
# 高级函数插件
from crazy_functional import get_crazy_functions
crazy_fns = get_crazy_functions()
# 处理markdown文本格式的转变
gr.Chatbot.postprocess = format_io
# 做一些外观色彩上的调整
from theme import adjust_theme, advanced_css, custom_css
set_theme = adjust_theme()
# 代理与自动更新
from check_proxy import check_proxy, auto_update, warm_up_modules
import func_box
from check_proxy import get_current_version
os.makedirs("gpt_log", exist_ok=True)
try:
logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
except:
logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, LAYOUT, API_KEY, AVAIL_LLM_MODELS, LOCAL_PORT= \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'LAYOUT',
'API_KEY', 'AVAIL_LLM_MODELS', 'LOCAL_PORT')
proxy_info = check_proxy(proxies)
# 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
if not AUTHENTICATION: AUTHENTICATION = None
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
class ChatBotFrame:
def __init__(self):
self.cancel_handles = []
self.initial_prompt = "You will play a professional to answer me according to my needs."
self.title_html = f"<h1 align=\"center\">Chatbot for KSO {get_current_version()}</h1>"
self.description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
class ChatBot(ChatBotFrame):
def __init__(self):
super().__init__()
self.__url = f'http://{func_box.ipaddr()}:{PORT}'
# self.__gr_url = gr.State(self.__url)
def draw_title(self):
# self.title = gr.HTML(self.title_html)
self.cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL, 'local': self.__url})
def draw_chatbot(self):
self.chatbot = gr.Chatbot(elem_id='main_chatbot', label=f"当前模型:{LLM_MODEL}")
self.chatbot.style()
self.history = gr.State([])
temp_draw = [gr.HTML() for i in range(7)]
with gr.Box(elem_id='chat_box'):
self.state_users = gr.HTML(value='', visible=False, elem_id='state_users')
with gr.Row():
self.sm_upload = gr.UploadButton(label='UPLOAD', file_count='multiple', elem_classes='sm_btn').style(size='sm', full_width=False)
self.sm_code_block = gr.Button(value='CODE', elem_classes='sm_btn').style(size='sm', full_width=False)
self.sm_upload_history = gr.Button("SPASE", variant="primary", elem_classes='sm_btn').style(size='sm', full_width=False)
self.md_dropdown = gr.Dropdown(choices=AVAIL_LLM_MODELS, value=LLM_MODEL,
show_label=False, interactive=True,
elem_classes='sm_select', elem_id='change-font-size').style(container=False)
gr.HTML(func_box.get_html("appearance_switcher.html").format(label=""), elem_id='user_input_tb', elem_classes="insert_block")
with gr.Row():
self.txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_classes='chat_input').style(container=False)
self.input_copy = gr.State('')
self.submitBtn = gr.Button("", variant="primary", elem_classes='submit_btn').style(full_width=False)
with gr.Row():
self.status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行\n {proxy_info}", elem_id='debug_mes')
def signals_sm_btn(self):
self.sm_upload.upload(on_file_uploaded, [self.sm_upload, self.chatbot, self.txt], [self.chatbot, self.txt]).then(
fn=lambda: [gr.Tabs.update(selected='plug_tab'), gr.Column.update(visible=False)], inputs=None, outputs=[self.tabs_inputs, self.examples_column]
)
self.sm_code_block.click(fn=lambda x: x+'```\n\n```', inputs=[self.txt], outputs=[self.txt])
self.sm_upload_history.click(get_user_upload, [self.chatbot], outputs=[self.chatbot]).then(fn=lambda: gr.Column.update(visible=False), inputs=None, outputs=self.examples_column)
# self.sm_select_font.select(fn=lambda x: gr.HTML.update(value=f"{x}px"), inputs=[self.sm_select_font], outputs=[self.state_users])
def draw_examples(self):
with gr.Column(elem_id='examples_col') as self.examples_column:
gr.Markdown('# Get Started Quickly')
with gr.Row():
hide_components = gr.Textbox(visible=False)
gr.Button.update = func_box.update_btn
self.example = [['今天伦敦天气怎么样?', '对2021年以后的世界和事件了解有限', self.submitBtn.update(elem_id='highlight_update')],
['今夕何夕,明月何月?', '偶尔会产生不正确的信息', self.submitBtn.update(elem_id='highlight_update')],
['怎么才能把学校给炸了?', '经过训练,会拒绝不适当的请求', self.submitBtn.update(elem_id='highlight_update')]]
self.example_inputs = [self.txt, hide_components, self.submitBtn]
self.guidance_example = gr.Examples(examples=self.example, inputs=self.example_inputs, label='基础对话')
self.guidance_plugins = gr.Dataset(components=[gr.HTML(visible=False)], samples=[['...'] for i in range(4)], label='高级功能', type='index')
self.guidance_plugins_state = gr.State()
self.guidance_news = gr.Examples(examples=func_box.git_log_list(), inputs=[hide_components, hide_components], label='News')
def plug_update(index, date_set):
variant = crazy_fns[date_set[index]]["Color"] if "Color" in crazy_fns[date_set[index]] else "secondary"
ret = {self.switchy_bt: self.switchy_bt.update(value=date_set[index], variant=variant, elem_id='highlight_update'),
self.tabs_inputs: gr.Tabs.update(selected='plug_tab'),
self.area_crazy_fn: self.area_crazy_fn.update(open=True)}
fns_value = func_box.txt_converter_json(str(crazy_fns[date_set[index]].get('Parameters', '')))
fns_lable = f"插件[{date_set[index]}]的高级参数说明:\n" + crazy_fns[date_set[index]].get("ArgsReminder", f"没有提供高级参数功能说明")
temp_dict = dict(visible=True, interactive=True, value=str(fns_value), label=fns_lable)
# 是否唤起高级插件参数区
if crazy_fns[date_set[index]].get("AdvancedArgs", False):
ret.update({self.plugin_advanced_arg: gr.update(**temp_dict)})
ret.update({self.area_crazy_fn: self.area_crazy_fn.update(open=False)})
else:
ret.update({self.plugin_advanced_arg: gr.update(visible=False, label=f"插件[{date_set[index]}]不需要高级参数。")})
return ret
self.guidance_plugins.select(fn=plug_update, inputs=[self.guidance_plugins, self.guidance_plugins_state],
outputs=[self.switchy_bt, self.plugin_advanced_arg, self.tabs_inputs,
self.area_crazy_fn])
def __clear_input(self, inputs):
return '', inputs, self.examples_column.update(visible=False)
def draw_prompt(self):
with gr.Row():
self.pro_search_txt = gr.Textbox(show_label=False, placeholder="Enter the prompt you want.").style(
container=False)
self.pro_entry_btn = gr.Button("搜索", variant="primary").style(full_width=False, size="sm")
with gr.Row():
with gr.Accordion(label='Prompt usage frequency'):
self.pro_prompt_list = gr.Dataset(components=[gr.HTML(visible=False)], samples_per_page=10,
label='Results',
samples=[[". . ."] for i in range(20)], type='index')
self.pro_prompt_state = gr.State(self.pro_prompt_list)
def draw_temp_edit(self):
with gr.Box():
with gr.Row():
with gr.Column(scale=100):
self.pro_results = gr.Chatbot(label='Prompt and result', elem_id='prompt_result').style()
with gr.Column(scale=16):
Tips = "用 BORF 分析法设计chat GPT prompt:\n" \
"1、阐述背景 B(Background): 说明背景为chatGPT提供充足的信息\n" \
"2、定义目标 O(Objectives):“我们希望实现什么”\n" \
"3、定义关键结果 R(key Result):“我要什么具体效果”\n" \
"4、试验并调整改进 E(Evolve):三种改进方法自由组合\n" \
"\t 改进输入从答案的不足之处着手改进背景B,目标O与关键结果R\n" \
"\t 改进答案在后续对话中指正chatGPT答案缺点\n" \
"\t 重新生成尝试在prompt不变的情况下多次生成结果优中选优\n" \
"\t 熟练使用占位符{{{v}}}: 当Prompt存在占位符则优先将{{{v}}}替换为预期文本"
self.pro_edit_txt = gr.Textbox(show_label=False, info='Prompt编辑区', lines=14,
placeholder=Tips).style(container=False)
with gr.Row():
self.pro_name_txt = gr.Textbox(show_label=False, placeholder='是否全复用prompt / prompt功能名', ).style(
container=False)
self.pro_new_btn = gr.Button("保存Prompt", variant="primary").style(size='sm').style()
with gr.Row(elem_id='sm_btn'):
self.pro_reuse_btn = gr.Button("复用Result", variant="secondary").style(size='sm').style(full_width=False)
self.pro_clear_btn = gr.Button("重置Result", variant="stop").style(size='sm').style(full_width=False)
def signals_prompt_edit(self):
self.pro_clear_btn.click(fn=lambda: [], inputs=None, outputs=self.pro_results)
self.prompt_tab.select(fn=func_box.draw_results,
inputs=[self.pro_search_txt, self.pro_prompt_state, self.pro_tf_slider,
self.pro_private_check],
outputs=[self.pro_prompt_list, self.pro_prompt_state])
self.pro_search_txt.submit(fn=func_box.draw_results,
inputs=[self.pro_search_txt, self.pro_prompt_state, self.pro_tf_slider,
self.pro_private_check],
outputs=[self.pro_prompt_list, self.pro_prompt_state])
self.pro_entry_btn.click(fn=func_box.draw_results,
inputs=[self.pro_search_txt, self.pro_prompt_state, self.pro_tf_slider,
self.pro_private_check],
outputs=[self.pro_prompt_list, self.pro_prompt_state])
self.pro_prompt_list.click(fn=func_box.show_prompt_result,
inputs=[self.pro_prompt_list, self.pro_prompt_state, self.pro_results, self.pro_edit_txt, self.pro_name_txt],
outputs=[self.pro_results, self.pro_edit_txt, self.pro_name_txt])
self.pro_new_btn.click(fn=func_box.prompt_save,
inputs=[self.pro_edit_txt, self.pro_name_txt, self.pro_fp_state],
outputs=[self.pro_edit_txt, self.pro_name_txt, self.pro_private_check,
self.pro_func_prompt, self.pro_fp_state, self.tabs_chatbot])
self.pro_reuse_btn.click(
fn=func_box.reuse_chat,
inputs=[self.pro_results, self.chatbot, self.history, self.pro_name_txt, self.txt],
outputs=[self.chatbot, self.history, self.txt, self.tabs_chatbot, self.pro_name_txt, self.examples_column]
)
def draw_function_chat(self):
prompt_list, devs_document = get_conf('prompt_list', 'devs_document')
with gr.TabItem('Function', id='func_tab'):
with gr.Accordion("基础功能区", open=False) as self.area_basic_fn:
with gr.Row():
for k in functional:
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant)
with gr.Accordion("上传你的Prompt", open=False) as self.area_basic_fn:
jump_link = f'<a href="{devs_document}" target="_blank">Developer Documentation</a>'
self.pro_devs_link = gr.HTML(jump_link)
self.pro_upload_btn = gr.File(file_count='single', file_types=['.yaml', '.json'],
label=f'上传你的Prompt文件, 编写格式请遵循上述开发者文档', )
self.pro_private_check = gr.CheckboxGroup(choices=prompt_list['key'], value=prompt_list['value'],
label='选择展示Prompt')
self.pro_func_prompt = gr.Dataset(components=[gr.HTML()], label="Prompt List", visible=False,
samples=[['...', ""] for i in range(20)], type='index',
samples_per_page=10)
self.pro_fp_state = gr.State(self.pro_func_prompt)
def signals_prompt_func(self):
self.pro_private_check.select(fn=func_box.prompt_reduce,
inputs=[self.pro_private_check, self.pro_fp_state],
outputs=[self.pro_func_prompt, self.pro_fp_state, self.pro_private_check])
self.tabs_code = gr.State(0)
self.pro_func_prompt.select(fn=func_box.prompt_input,
inputs=[self.txt, self.pro_edit_txt, self.pro_name_txt, self.pro_func_prompt, self.pro_fp_state, self.tabs_code],
outputs=[self.txt, self.pro_edit_txt, self.pro_name_txt])
self.pro_upload_btn.upload(fn=func_box.prompt_upload_refresh,
inputs=[self.pro_upload_btn, self.pro_prompt_state],
outputs=[self.pro_func_prompt, self.pro_prompt_state, self.pro_private_check])
self.chat_tab.select(fn=lambda: 0, inputs=None, outputs=self.tabs_code)
self.prompt_tab.select(fn=lambda: 1, inputs=None, outputs=self.tabs_code)
def draw_public_chat(self):
with gr.TabItem('Plugins', id='plug_tab'):
with gr.Accordion("上传本地文件可供高亮函数插件调用", open=False) as self.area_file_up:
self.file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)",
file_count="multiple")
self.file_upload.style()
with gr.Accordion("函数插件区", open=True) as self.area_crazy_fn:
with gr.Row():
for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue
self.variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
crazy_fns[k]["Button"] = gr.Button(k, variant=self.variant)
crazy_fns[k]["Button"].style(size="sm")
with gr.Accordion("更多函数插件/高级用法", open=True, ):
dropdown_fn_list = []
for k in crazy_fns.keys():
if not crazy_fns[k].get("AsButton", True):
dropdown_fn_list.append(k)
elif crazy_fns[k].get('AdvancedArgs', False):
dropdown_fn_list.append(k)
self.dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", show_label=False, label="").style(
container=False)
self.plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
self.switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
def draw_setting_chat(self):
switch_model = get_conf('switch_model')[0]
with gr.TabItem('Settings', id='sett_tab'):
self.top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01, interactive=True,
label="Top-p (nucleus sampling)", ).style(container=False)
self.temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
label="Temperature", ).style(container=False)
self.max_length_sl = gr.Slider(minimum=256, maximum=4096, value=4096, step=1, interactive=True,
label="MaxLength", ).style(container=False)
self.pro_tf_slider = gr.Slider(minimum=0.01, maximum=1.0, value=0.70, step=0.01, interactive=True,
label="Term Frequency系数").style(container=False)
self.models_box = gr.CheckboxGroup(choices=switch_model['key'], value=switch_model['value'], label="对话模式")
self.system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt",
label="System prompt", value=self.initial_prompt)
# temp = gr.Markdown(self.description)
def draw_goals_auto(self):
with gr.Row():
self.ai_name = gr.Textbox(show_label=False, placeholder="给Ai一个名字").style(container=False)
with gr.Row():
self.ai_role = gr.Textbox(lines=5, show_label=False, placeholder="请输入你的需求").style(
container=False)
with gr.Row():
self.ai_goal_list = gr.Dataframe(headers=['Goals'], interactive=True, row_count=4,
col_count=(1, 'fixed'), type='array')
with gr.Row():
self.ai_budget = gr.Number(show_label=False, value=0.0,
info="关于本次项目的预算,超过预算自动停止,默认无限").style(container=False)
def draw_next_auto(self):
with gr.Row():
self.text_continue = gr.Textbox(visible=False, show_label=False,
placeholder="请根据提示输入执行命令").style(container=False)
with gr.Row():
self.submit_start = gr.Button("Start", variant='primary')
self.submit_next = gr.Button("Next", visible=False, variant='primary')
self.submit_stop = gr.Button("Stop", variant="stop")
self.agent_obj = gr.State({'obj': None, "start": self.submit_start,
"next": self.submit_next, "text": self.text_continue})
def signals_input_setting(self):
# 注册input
self.input_combo = [self.cookies, self.max_length_sl, self.md_dropdown,
self.input_copy, self.top_p, self.temperature, self.chatbot, self.history,
self.system_prompt, self.models_box, self.plugin_advanced_arg]
self.output_combo = [self.cookies, self.chatbot, self.history, self.status]
self.predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=self.input_combo, outputs=self.output_combo)
self.clear_agrs = dict(fn=self.__clear_input, inputs=[self.txt], outputs=[self.txt, self.input_copy,
self.examples_column])
# 提交按钮、重置按钮
self.cancel_handles.append(self.txt.submit(**self.clear_agrs).then(**self.predict_args))
self.cancel_handles.append(self.submitBtn.click(**self.clear_agrs).then(**self.predict_args))
# self.cpopyBtn.click(fn=func_box.copy_result, inputs=[self.history], outputs=[self.status])
self.resetBtn.click(lambda: ([], [], "已重置"), None, [self.chatbot, self.history, self.status])
def signals_function(self):
# 基础功能区的回调函数注册
for k in functional:
self.click_handle = functional[k]["Button"].click(**self.clear_agrs).then(fn=ArgsGeneralWrapper(predict),
inputs=[*self.input_combo, gr.State(True), gr.State(k)],
outputs=self.output_combo)
self.cancel_handles.append(self.click_handle)
def signals_public(self):
# 文件上传区接收文件后与chatbot的互动
self.file_upload.upload(on_file_uploaded, [self.file_upload, self.chatbot, self.txt], [self.chatbot, self.txt])
# 函数插件-固定按钮区
for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue
self.click_handle = crazy_fns[k]["Button"].click(**self.clear_agrs).then(
ArgsGeneralWrapper(crazy_fns[k]["Function"]),
[*self.input_combo, gr.State(PORT), gr.State(crazy_fns[k].get('Parameters', False))],
self.output_combo)
self.click_handle.then(on_report_generated, [self.cookies, self.file_upload, self.chatbot],
[self.cookies, self.file_upload, self.chatbot])
# self.click_handle.then(fn=lambda x: '', inputs=[], outputs=self.txt)
self.cancel_handles.append(self.click_handle)
# 函数插件-下拉菜单与随变按钮的互动
def on_dropdown_changed(k):
# 按钮颜色随变
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
ret = {self.switchy_bt: self.switchy_bt.update(value=k, variant=variant)}
# 参数取随变
fns_value = func_box.txt_converter_json(str(crazy_fns[k].get('Parameters', '')))
fns_lable = f"插件[{k}]的高级参数说明:\n" + crazy_fns[k].get("ArgsReminder", f"没有提供高级参数功能说明")
temp_dict = dict(visible=True, interactive=True, value=str(fns_value), label=fns_lable)
# 是否唤起高级插件参数区
if crazy_fns[k].get("AdvancedArgs", False):
ret.update({self.plugin_advanced_arg: gr.update(**temp_dict)})
else:
ret.update({self.plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
return ret
self.dropdown.select(on_dropdown_changed, [self.dropdown], [self.switchy_bt, self.plugin_advanced_arg])
# 随变按钮的回调函数注册
def route(k, ipaddr: gr.Request, *args, **kwargs):
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
append = list(args)
append[-2] = func_box.txt_converter_json(append[-2])
append.insert(-1, ipaddr)
args = tuple(append)
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
self.click_handle = self.switchy_bt.click(**self.clear_agrs).then(route, [self.switchy_bt, *self.input_combo, gr.State(PORT)], self.output_combo)
self.click_handle.then(on_report_generated, [self.cookies, self.file_upload, self.chatbot],
[self.cookies, self.file_upload, self.chatbot])
self.cancel_handles.append(self.click_handle)
# 终止按钮的回调函数注册
self.stopBtn.click(fn=None, inputs=None, outputs=None, cancels=self.cancel_handles)
def on_md_dropdown_changed(k):
return {self.chatbot: gr.update(label="当前模型:" + k)}
self.md_dropdown.select(on_md_dropdown_changed, [self.md_dropdown], [self.chatbot])
def signals_auto_input(self):
self.auto_input_combo = [self.ai_name, self.ai_role, self.ai_goal_list, self.ai_budget,
self.cookies, self.chatbot, self.history,
self.agent_obj]
self.auto_output_combo = [self.cookies, self.chatbot, self.history, self.status,
self.agent_obj, self.submit_start, self.submit_next, self.text_continue]
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def auto_opentab_delay(self, is_open=False):
import threading, webbrowser, time
print(f"如果浏览器没有自动打开请复制并转到以下URL")
print(f"\t(亮色主题): http://localhost:{PORT}")
print(f"\t(暗色主题): {self.__url}/?__theme=dark")
if is_open:
def open():
time.sleep(2) # 打开浏览器
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__theme=dark")
threading.Thread(target=open, name="open-browser", daemon=True).start()
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
# threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
def main(self):
with gr.Blocks(title="Chatbot for KSO ", theme=set_theme, analytics_enabled=False, css=custom_css) as self.demo:
# 绘制页面title
self.draw_title()
# 绘制一个ROWrow会让底下的元素自动排成一行
with gr.Row().style(justify='between'):
# 绘制列1
with gr.Column(scale=44):
with gr.Tabs() as self.tabs_copilot:
# 绘制对话模组
with gr.TabItem('Chat-Copilot'):
with gr.Row():
# self.cpopyBtn = gr.Button("复制回答", variant="secondary").style(size="sm")
self.resetBtn = gr.Button("新建对话", variant="primary", elem_id='empty_btn').style(
size="sm")
self.stopBtn = gr.Button("中止对话", variant="stop").style(size="sm")
with gr.Tabs() as self.tabs_inputs:
self.draw_function_chat()
self.draw_public_chat()
self.draw_setting_chat()
# 绘制autogpt模组
with gr.TabItem('Auto-GPT'):
self.draw_next_auto()
self.draw_goals_auto()
# 绘制列2
with gr.Column(scale=100):
with gr.Tabs() as self.tabs_chatbot:
with gr.TabItem('Chatbot', id='chatbot') as self.chat_tab:
# self.draw_chatbot()
pass
with gr.TabItem('Prompt检索/编辑') as self.prompt_tab:
self.draw_prompt()
with self.chat_tab: # 使用 gr.State()对组件进行拷贝时如果之前绘制了Markdown格式会导致启动崩溃,所以将 markdown相关绘制放在最后
self.draw_chatbot()
self.draw_examples()
with self.prompt_tab:
self.draw_temp_edit()
# 函数注册需要在Blocks下进行
self.signals_sm_btn()
self.signals_input_setting()
self.signals_function()
self.signals_prompt_func()
self.signals_public()
self.signals_prompt_edit()
# self.signals_auto_input()
adv_plugins = gr.State([i for i in crazy_fns])
self.demo.load(fn=func_box.refresh_load_data, postprocess=False,
inputs=[self.chatbot, self.history, self.pro_fp_state, adv_plugins],
outputs=[self.pro_func_prompt, self.pro_fp_state, self.chatbot, self.history, self.guidance_plugins, self.guidance_plugins_state])
# Start
self.auto_opentab_delay()
self.demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION,
blocked_paths=["config.py", "config_private.py", "docker-compose.yml", "Dockerfile"])
def check_proxy_free():
proxy_state = func_box.Shell(f'lsof -i :{PORT}').read()[1].splitlines()
if proxy_state != ["", ""]:
print('Kill Old Server')
for i in proxy_state[1:]:
func_box.Shell(f'kill -9 {i.split()[1]}').read()
import time
time.sleep(5)
if __name__ == '__main__':
# PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
PORT = LOCAL_PORT if WEB_PORT <= 0 else WEB_PORT
check_proxy_free()
ChatBot().main()
gr.close_all()
check_proxy_free()

View File

@ -1,21 +1,23 @@
def check_proxy(proxies):
def check_proxy(proxies: dict):
import requests
proxies_https = proxies['https'] if proxies is not None else ''
proxies_https = proxies.get('https') if proxies is not None else ''
try:
response = requests.get("https://ipapi.co/json/",
proxies=proxies, timeout=4)
proxies=proxies, timeout=30)
data = response.json()
print(f'查询代理的地理位置,返回的结果是{data}')
if 'country_name' in data:
country = data['country_name']
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
elif 'error' in data:
result = f"代理配置 {proxies_https}, 代理所在地:未知"
result = f"代理配置 {proxies_https}, 代理所在地:未知IP查询频率受限"
else:
result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}"
print(result)
return result
except:
result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
except Exception as e:
result = f"代理 {proxies_https} 查询出现异常: {e},代理可能无效"
print(result)
return result

View File

@ -2,27 +2,8 @@
API_KEY = "sk-此处填API密钥" # 可同时填写多个API-KEY用英文逗号分割例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
prompt_list = {'key': ['所有人', '个人'], 'value': []}
switch_model = {'key': ['input加密', '隐私模式'], 'value': ['input加密']}
private_key = 'uhA51pHtjisfjij'
import func_box
import os
devs_document = "/file="+os.path.join(func_box.base_path, 'README.md')
#增加关于AZURE的配置信息 可以在AZURE网页中找到
AZURE_ENDPOINT = "https://你的api名称.openai.azure.com/"
AZURE_API_KEY = "填入azure openai api的密钥"
AZURE_API_VERSION = "填入api版本"
AZURE_ENGINE = "填入ENGINE"
# [step 2]>> 改为True应用代理如果直接在海外服务器部署此处不修改
USE_PROXY = False
LOCAL_PORT = 7891
if USE_PROXY:
# 填写格式是 [协议]:// [地址] :[端口]填写之前不要忘记把USE_PROXY改成True如果直接在海外服务器部署此处不修改
# 例如 "socks5h://localhost:11284"
@ -44,13 +25,10 @@ else:
DEFAULT_WORKER_NUM = 3
# [step 3]>> 以下配置可以优化体验,但大部分场合下并不需要修改 # 废弃了移步到theme.py 的 #main_chatbot中修改
# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
# 对话窗的高度
CHATBOT_HEIGHT = 1115
# 主题
THEME = "Default"
# 代码高亮
CODE_HIGHLIGHT = True
@ -69,17 +47,12 @@ MAX_RETRY = 2
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt35", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"]
# P.S. 其他可用的模型还包括 ["newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
# OpenAI的API_URL
API_URL = "https://api.openai.com/v1/chat/completions"
PROXY_API_URL = '' # 你的网关应用
# 设置gradio的并行线程数不需要修改
CONCURRENT_COUNT = 100
@ -89,9 +62,6 @@ AUTO_CLEAR_TXT = False
# 加一个live2d装饰
ADD_WAIFU = False
# 川虎JS
ADD_CHUANHU = True
# 设置用户名和密码不需要修改相关功能不稳定与gradio版本和网络都相关如果本地使用不建议加这个
# [("username", "password"), ("username2", "password2"), ...]
AUTHENTICATION = []

View File

@ -61,7 +61,7 @@ def get_core_functions():
},
"找图片": {
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL" +
r"然后请使用Markdown格式封装并且不要有反斜线不要用代码块。现在请按以下描述给我发送图片" + "\n",
r"然后请使用Markdown格式封装并且不要有反斜线不要用代码块。现在请按以下描述给我发送图片" + "\n\n",
"Suffix": r"",
"Visible": False,
},
@ -76,11 +76,3 @@ def get_core_functions():
"Suffix": r"",
}
}
def get_guidance():
pass
def get_guidance():
pass

View File

@ -20,28 +20,19 @@ def get_crazy_functions():
from crazy_functions.解析项目源代码 import 解析一个Lua项目
from crazy_functions.解析项目源代码 import 解析一个CSharp项目
from crazy_functions.总结word文档 import 总结word文档
from crazy_functions.辅助回答 import 猜你想问
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
from crazy_functions.对话历史存档 import 对话历史存档
from crazy_functions.对话历史存档 import 载入对话历史存档
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
from crazy_functions.批量Markdown翻译 import Markdown英译中
function_plugins = {
"猜你想问": {
"Function": HotReload(猜你想问)
},
"解析整个Python项目": {
"Color": "primary", # 按钮颜色
"AsButton": False,
"Color": "stop", # 按钮颜色
"Function": HotReload(解析一个Python项目)
},
"保存当前的对话": {
"AsButton": True,
"Function": HotReload(对话历史存档)
},
"载入对话历史存档(先上传存档或输入路径)": {
"Color": "primary",
"Color": "stop",
"AsButton":False,
"Function": HotReload(载入对话历史存档)
},
@ -49,78 +40,77 @@ def get_crazy_functions():
"AsButton":False,
"Function": HotReload(删除所有本地对话历史记录)
},
"[测试功能] 解析Jupyter Notebook文件": {
"Color": "primary",
"AsButton": False,
"Color": "stop",
"AsButton":False,
"Function": HotReload(解析ipynb文件),
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "若输入0则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
},
"批量总结Word文档": {
"AsButton": False,
"Color": "primary",
"Color": "stop",
"Function": HotReload(总结word文档)
},
"解析整个C++项目头文件": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个C项目的头文件)
},
"解析整个C++项目(.cpp/.hpp/.c/.h": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个C项目)
},
"解析整个Go项目": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个Golang项目)
},
"解析整个Rust项目": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个Rust项目)
},
"解析整个Java项目": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个Java项目)
},
"解析整个前端项目js,ts,css等": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个前端项目)
},
"解析整个Lua项目": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个Lua项目)
},
"解析整个CSharp项目": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析一个CSharp项目)
},
"读Tex论文写摘要": {
"Color": "primary", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Color": "stop", # 按钮颜色
"Function": HotReload(读文章写摘要)
},
"Markdown/Readme英译中": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"AsButton": False,
"Color": "stop",
"Function": HotReload(Markdown英译中)
},
"批量生成函数注释": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(批量生成函数注释)
},
"保存当前的对话": {
"Function": HotReload(对话历史存档)
},
"[多线程Demo] 解析此项目本身(源码自译解)": {
"Function": HotReload(解析项目本身),
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析项目本身)
},
# "[老旧的Demo] 把本项目源代码切换成全英文": {
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
@ -129,8 +119,7 @@ def get_crazy_functions():
# },
"[插件demo] 历史上的今天": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Function": HotReload(高阶功能模板函数),
"AsButton": False,
"Function": HotReload(高阶功能模板函数)
},
}
@ -149,69 +138,69 @@ def get_crazy_functions():
function_plugins.update({
"批量翻译PDF文档多线程": {
"Color": "primary",
"AsButton": False, # 加入下拉菜单中
"Color": "stop",
"AsButton": True, # 加入下拉菜单中
"Function": HotReload(批量翻译PDF文档)
},
"询问多个GPT模型": {
"Color": "primary", # 按钮颜色
"Color": "stop", # 按钮颜色
"Function": HotReload(同时问询)
},
"[测试功能] 批量总结PDF文档": {
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Function": HotReload(批量总结PDF文档)
},
# "[测试功能] 批量总结PDF文档pdfminer": {
# "Color": "primary",
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# "Function": HotReload(批量总结PDF文档pdfminer)
# },
"谷歌学术检索助手输入谷歌学术搜索页url": {
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(谷歌检索小助手)
},
"理解PDF文档内容 模仿ChatPDF": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"AsButton": True, # 加入下拉菜单中
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(理解PDF文档内容标准文件输入)
},
"英文Latex项目全文润色输入路径或上传压缩包": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(Latex英文润色)
},
"英文Latex项目全文纠错输入路径或上传压缩包": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(Latex英文纠错)
},
"中文Latex项目全文润色输入路径或上传压缩包": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(Latex中文润色)
},
"Latex项目全文中译英输入路径或上传压缩包": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(Latex中译英)
},
"Latex项目全文英译中输入路径或上传压缩包": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(Latex英译中)
},
"批量Markdown中译英输入路径或上传压缩包": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(Markdown中译英)
},
@ -221,11 +210,12 @@ def get_crazy_functions():
###################### 第三组插件 ###########################
# [第三组插件]: 尚未充分测试的函数插件
try:
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
function_plugins.update({
"一键下载arxiv论文并翻译摘要先在input输入编号如1812.10695": {
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(下载arxiv论文并翻译摘要)
}
@ -237,7 +227,7 @@ def get_crazy_functions():
from crazy_functions.联网的ChatGPT import 连接网络回答问题
function_plugins.update({
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(连接网络回答问题)
}
@ -245,7 +235,7 @@ def get_crazy_functions():
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
function_plugins.update({
"连接网络回答问题中文Bing版输入问题后点击该插件": {
"Color": "primary",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(连接bing搜索回答问题)
}
@ -257,7 +247,7 @@ def get_crazy_functions():
from crazy_functions.解析项目源代码 import 解析任意code项目
function_plugins.update({
"解析项目源代码(手动指定和筛选源代码文件类型)": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
@ -271,7 +261,7 @@ def get_crazy_functions():
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
function_plugins.update({
"询问多个GPT模型手动指定询问哪些模型": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "支持任意数量的llm接口用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
@ -285,7 +275,7 @@ def get_crazy_functions():
from crazy_functions.图片生成 import 图片生成
function_plugins.update({
"图片生成先切换模型到openai或api2d": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入分辨率, 如256x256默认", # 高级参数输入区的显示提示
@ -299,7 +289,7 @@ def get_crazy_functions():
from crazy_functions.总结音视频 import 总结音视频
function_plugins.update({
"批量总结音视频(输入路径或上传压缩包)": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示例如解析为简体中文默认",
@ -309,51 +299,11 @@ def get_crazy_functions():
except:
print('Load function plugin failed')
from crazy_functions.解析项目源代码 import 解析任意code项目
function_plugins.update({
"解析项目源代码(手动指定和筛选源代码文件类型)": {
"Color": "primary",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
"Function": HotReload(解析任意code项目)
},
})
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
function_plugins.update({
"询问多个GPT模型手动指定询问哪些模型": {
"Color": "primary",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "支持任意数量的llm接口用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
"Function": HotReload(同时问询_指定模型)
},
})
from crazy_functions.图片生成 import 图片生成
function_plugins.update({
"图片生成先切换模型到openai或api2d": {
"Color": "primary",
"AsButton": True,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "在这里输入分辨率, 如'256x256'(默认), '512x512', '1024x1024'", # 高级参数输入区的显示提示
"Function": HotReload(图片生成)
},
})
from crazy_functions.总结音视频 import 总结音视频
function_plugins.update({
"批量总结音视频(输入路径或上传压缩包)": {
"Color": "primary",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示例如解析为简体中文默认",
"Function": HotReload(总结音视频)
}
})
try:
from crazy_functions.数学动画生成manim import 动画生成
function_plugins.update({
"数学动画生成Manim": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"Function": HotReload(动画生成)
}
@ -365,7 +315,7 @@ def get_crazy_functions():
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
function_plugins.update({
"Markdown翻译手动指定语言": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "请输入要翻译成哪种语言默认为Chinese。",
@ -379,7 +329,7 @@ def get_crazy_functions():
from crazy_functions.Langchain知识库 import 知识库问答
function_plugins.update({
"[功能尚不稳定] 构建知识库(请先上传文件素材)": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "待注入的知识库名称id, 默认为default",
@ -393,7 +343,7 @@ def get_crazy_functions():
from crazy_functions.Langchain知识库 import 读取知识库作答
function_plugins.update({
"[功能尚不稳定] 知识库问答": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库",
@ -407,7 +357,7 @@ def get_crazy_functions():
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
function_plugins.update({
"Latex英文纠错+高亮修正位置 [需Latex]": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
@ -417,7 +367,7 @@ def get_crazy_functions():
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
function_plugins.update({
"Arixv翻译输入arxivID[需Latex]": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder":
@ -428,7 +378,7 @@ def get_crazy_functions():
})
function_plugins.update({
"本地论文翻译上传Latex压缩包[需Latex]": {
"Color": "primary",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder":
@ -444,7 +394,7 @@ def get_crazy_functions():
# from crazy_functions.虚空终端 import 终端
# function_plugins.update({
# "超级终端": {
# "Color": "primary",
# "Color": "stop",
# "AsButton": False,
# # "AdvancedArgs": True,
# # "ArgsReminder": "",
@ -454,5 +404,4 @@ def get_crazy_functions():
# except:
# print('Load function plugin failed')
###################### 第n组插件 ###########################
return function_plugins

View File

@ -1,16 +1,19 @@
from toolbox import update_ui, get_conf, trimmed_format_exc
import threading
def input_clipping(inputs, history, max_token_limit):
import numpy as np
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
def get_token_num(txt):
return len(enc.encode(txt, disallowed_special=()))
mode = 'input-and-history'
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
input_token_num = get_token_num(inputs)
if input_token_num < max_token_limit//2:
if input_token_num < max_token_limit // 2:
mode = 'only-history'
max_token_limit = max_token_limit - input_token_num
@ -18,13 +21,13 @@ def input_clipping(inputs, history, max_token_limit):
everything.extend(history)
n_token = get_token_num('\n'.join(everything))
everything_token = [get_token_num(e) for e in everything]
delta = max(everything_token) // 16 # 截断时的颗粒度
delta = max(everything_token) // 16 # 截断时的颗粒度
while n_token > max_token_limit:
where = np.argmax(everything_token)
encoded = enc.encode(everything[where], disallowed_special=())
clipped_encoded = encoded[:len(encoded)-delta]
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
clipped_encoded = encoded[:len(encoded) - delta]
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
everything_token[where] = get_token_num(everything[where])
n_token = get_token_num('\n'.join(everything))
@ -35,12 +38,13 @@ def input_clipping(inputs, history, max_token_limit):
history = everything[1:]
return inputs, history
def request_gpt_model_in_new_thread_with_ui_alive(
inputs, inputs_show_user, llm_kwargs,
chatbot, history, sys_prompt, refresh_interval=0.2,
handle_token_exceed=True,
retry_times_at_unknown_error=2,
):
):
"""
Request GPT model请求GPT模型同时维持用户界面活跃。
@ -64,15 +68,16 @@ def request_gpt_model_in_new_thread_with_ui_alive(
from request_llm.bridge_all import predict_no_ui_long_connection
# 用户反馈
chatbot.append([inputs_show_user, ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
executor = ThreadPoolExecutor(max_workers=16)
mutable = ["", time.time(), ""]
def _req_gpt(inputs, history, sys_prompt):
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
while True:
# watchdog error
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
if len(mutable) >= 2 and (time.time() - mutable[1]) > 5:
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
@ -89,14 +94,14 @@ def request_gpt_model_in_new_thread_with_ui_alive(
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN - EXCEED_ALLO)
mutable[0] += f'[Local Message] 警告文本过长将进行截断Token溢出数{n_exceed}\n\n'
continue # 返回重试
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback\n\n{tb_str}\n\n"
return mutable[0] # 放弃
return mutable[0] # 放弃
except:
# 【第三种情况】:其他错误:重试几次
tb_str = '```\n' + trimmed_format_exc() + '```'
@ -104,14 +109,15 @@ def request_gpt_model_in_new_thread_with_ui_alive(
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback\n\n{tb_str}\n\n"
if retry_op > 0:
retry_op -= 1
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}\n\n"
mutable[
0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error - retry_op}/{retry_times_at_unknown_error}\n\n"
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
time.sleep(30)
time.sleep(5)
continue # 返回重试
continue # 返回重试
else:
time.sleep(5)
return mutable[0] # 放弃
return mutable[0] # 放弃
# 提交任务
future = executor.submit(_req_gpt, inputs, history, sys_prompt)
@ -123,11 +129,11 @@ def request_gpt_model_in_new_thread_with_ui_alive(
if future.done():
break
chatbot[-1] = [chatbot[-1][0], mutable[0]]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
final_result = future.result()
chatbot[-1] = [chatbot[-1][0], final_result]
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
return final_result
@ -137,7 +143,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
handle_token_exceed=True, show_user_at_complete=False,
retry_times_at_unknown_error=2,
):
):
"""
Request GPT model using multiple threads with UI and high efficiency
请求GPT模型的[多线程]版。
@ -170,21 +176,23 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
from request_llm.bridge_all import predict_no_ui_long_connection
assert len(inputs_array) == len(history_array)
assert len(inputs_array) == len(sys_prompt_array)
if max_workers == -1: # 读取配置文件
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
except: max_workers = 8
if max_workers == -1: # 读取配置文件
try:
max_workers, = get_conf('DEFAULT_WORKER_NUM')
except:
max_workers = 8
if max_workers <= 0: max_workers = 3
# 屏蔽掉 chatglm的多线程可能会导致严重卡顿
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-') or llm_kwargs['llm_model'].startswith('proxy-gpt')):
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
max_workers = 1
executor = ThreadPoolExecutor(max_workers=max_workers)
n_frag = len(inputs_array)
# 用户反馈
chatbot.append([None, ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
chatbot.append(["请开始多线程操作。", ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
# 跨线程传递
mutable = [[f"", time.time(), "等待中"] for _ in range(n_frag)]
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
# 子线程任务
def _req_gpt(index, inputs, history, sys_prompt):
@ -194,7 +202,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
mutable[index][2] = "执行中"
while True:
# watchdog error
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
if len(mutable[index]) >= 2 and (time.time() - mutable[index][1]) > 5:
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
@ -214,23 +222,25 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN - EXCEED_ALLO)
gpt_say += f'[Local Message] 警告文本过长将进行截断Token溢出数{n_exceed}\n\n'
mutable[index][2] = f"截断重试"
continue # 返回重试
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
if len(mutable[index][0]) > 0:
gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
mutable[index][2] = "输入过长已放弃"
return gpt_say # 放弃
except:
return gpt_say # 放弃
except Exception as e:
# 【第三种情况】:其他错误
tb_str = '```\n' + trimmed_format_exc() + '```'
print(tb_str)
print(f"发生异常:{e}, 调用栈信息:{tb_str}")
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
if len(mutable[index][0]) > 0:
gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
if retry_op > 0:
retry_op -= 1
wait = random.randint(5, 20)
@ -241,19 +251,22 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
fail_info = ""
# 也许等待十几秒后,情况会好转
for i in range(wait):
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
mutable[index][2] = f"{fail_info}等待重试 {wait - i}";
time.sleep(1)
# 开始重试
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
continue # 返回重试
mutable[index][
2] = f"重试中 {retry_times_at_unknown_error - retry_op}/{retry_times_at_unknown_error}"
continue # 返回重试
else:
mutable[index][2] = "已失败"
wait = 5
time.sleep(5)
return gpt_say # 放弃
return gpt_say # 放弃
# 异步任务开始
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in
zip(
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
cnt = 0
while True:
# yield一次以刷新前端页面
@ -267,18 +280,17 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
mutable[thread_index][1] = time.time()
# 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
print_something_really_funny = "[ ...`" + mutable[thread_index][0][-scroller_max_len:]. \
replace('\n', '').replace('```', '...').replace(
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
' ', '.').replace('<br/>', '.....').replace('$', '.') + "`... ]"
observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西
stat_str = ''.join([f'`{inputs_show_user_array[thread_index][0:5]}...{inputs_show_user_array[thread_index][-5:]}`\t'
f'`{mutable[thread_index][2]}`: {obs}\n\n'
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
if not done else f'`{mutable[thread_index][2]}`\n\n'
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
# 在前端打印些好玩的东西
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.'] * (cnt % 10 + 1))]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
if all(worker_done):
executor.shutdown()
break
@ -294,7 +306,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
for inputs_show_user, f in zip(inputs_show_user_array, futures):
gpt_res = f.result()
chatbot.append([inputs_show_user, gpt_res])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
time.sleep(0.3)
return gpt_response_collection
@ -307,6 +319,7 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut)
cnt = 0
for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line:
if lines[cnt] != "":
@ -323,6 +336,7 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
result = [prev]
result.extend(cut(post, must_break_at_empty_line))
return result
try:
return cut(txt, must_break_at_empty_line=True)
except RuntimeError:
@ -338,6 +352,7 @@ def force_breakdown(txt, limit, get_token_fn):
return txt[:i], txt[i:]
return "Tiktoken未知错误", "Tiktoken未知错误"
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
# 递归
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
@ -366,6 +381,7 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
result = [prev]
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
return result
try:
# 第1次尝试将双空行\n\n作为切分点
return cut(txt, must_break_at_empty_line=True)
@ -376,7 +392,7 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
except RuntimeError:
try:
# 第3次尝试将英文句号.)作为切分点
res = cut(txt.replace('.', '\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
res = cut(txt.replace('.', '\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
return [r.replace('\n', '.') for r in res]
except RuntimeError as e:
try:
@ -388,7 +404,6 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
def read_and_clean_pdf_text(fp):
"""
这个函数用于分割pdf用了很多trick逻辑较乱效果奇好
@ -416,8 +431,9 @@ def read_and_clean_pdf_text(fp):
fc = 0 # Index 0 文本
fs = 1 # Index 1 字体
fb = 2 # Index 2 框框
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的判定为不是正文有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的判定为不是正文有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
def primary_ffsize(l):
"""
提取文本块主字体
@ -428,11 +444,11 @@ def read_and_clean_pdf_text(fp):
fsize_statiscs[wtf['size']] += len(wtf['text'])
return max(fsize_statiscs, key=fsize_statiscs.get)
def ffsize_same(a,b):
def ffsize_same(a, b):
"""
提取字体大小是否近似相等
"""
return abs((a-b)/max(a,b)) < 0.02
return abs((a - b) / max(a, b)) < 0.02
with fitz.open(fp) as doc:
meta_txt = []
@ -452,14 +468,15 @@ def read_and_clean_pdf_text(fp):
if len(txt_line) == 0: continue
pf = primary_ffsize(l)
meta_line.append([txt_line, pf, l['bbox'], l])
for wtf in l['spans']: # for l in t['lines']:
for wtf in l['spans']: # for l in t['lines']:
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
# meta_line.append(["NEW_BLOCK", pf])
# 块元提取 for each word segment with in line for each line cross-line words for each block
# 块元提取 for each word segment with in line for each line
# cross-line words for each block
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
if index == 0:
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
@ -483,26 +500,27 @@ def read_and_clean_pdf_text(fp):
if REMOVE_FOOT_NOTE:
if meta_line[index][fs] <= give_up_fize_threshold:
continue
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
if ffsize_same(meta_line[index][fs], meta_line[index - 1][fs]):
# 尝试识别段落
if meta_line[index][fc].endswith('.') and\
(meta_line[index-1][fc] != 'NEW_BLOCK') and \
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
if meta_line[index][fc].endswith('.') and \
(meta_line[index - 1][fc] != 'NEW_BLOCK') and \
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (
meta_line[index - 1][fb][2] - meta_line[index - 1][fb][0]) * 0.7:
sec[-1] += line[fc]
sec[-1] += "\n\n"
else:
sec[-1] += " "
sec[-1] += line[fc]
else:
if (index+1 < len(meta_line)) and \
meta_line[index][fs] > main_fsize:
if (index + 1 < len(meta_line)) and \
meta_line[index][fs] > main_fsize:
# 单行 + 字体大
mega_sec.append(copy.deepcopy(sec))
sec = []
sec.append("# " + line[fc])
else:
# 尝试识别section
if meta_line[index-1][fs] > meta_line[index][fs]:
if meta_line[index - 1][fs] > meta_line[index][fs]:
sec.append("\n" + line[fc])
else:
sec.append(line[fc])
@ -521,13 +539,15 @@ def read_and_clean_pdf_text(fp):
if len(block_txt) < 100:
meta_txt[index] = '\n'
return meta_txt
meta_txt = 把字符太少的块清除为回车(meta_txt)
def 清理多余的空行(meta_txt):
for index in reversed(range(1, len(meta_txt))):
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
if meta_txt[index] == '\n' and meta_txt[index - 1] == '\n':
meta_txt.pop(index)
return meta_txt
meta_txt = 清理多余的空行(meta_txt)
def 合并小写开头的段落块(meta_txt):
@ -538,16 +558,18 @@ def read_and_clean_pdf_text(fp):
return True
else:
return False
for _ in range(100):
for index, block_txt in enumerate(meta_txt):
if starts_with_lowercase_word(block_txt):
if meta_txt[index-1] != '\n':
meta_txt[index-1] += ' '
if meta_txt[index - 1] != '\n':
meta_txt[index - 1] += ' '
else:
meta_txt[index-1] = ''
meta_txt[index-1] += meta_txt[index]
meta_txt[index - 1] = ''
meta_txt[index - 1] += meta_txt[index]
meta_txt[index] = '\n'
return meta_txt
meta_txt = 合并小写开头的段落块(meta_txt)
meta_txt = 清理多余的空行(meta_txt)
@ -567,7 +589,7 @@ def read_and_clean_pdf_text(fp):
return meta_txt, page_one_meta
def get_files_from_everything(txt, type): # type='.md'
def get_files_from_everything(txt, type): # type='.md'
"""
这个函数是用来获取指定目录下所有指定类型(如.md的文件并且对于网络上的文件也可以获取它。
下面是对每个参数和返回值的说明:
@ -589,9 +611,10 @@ def get_files_from_everything(txt, type): # type='.md'
from toolbox import get_conf
proxies, = get_conf('proxies')
r = requests.get(txt, proxies=proxies)
with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content)
with open('./gpt_log/temp' + type, 'wb+') as f:
f.write(r.content)
project_folder = './gpt_log/'
file_manifest = ['./gpt_log/temp'+type]
file_manifest = ['./gpt_log/temp' + type]
elif txt.endswith(type):
# 直接给定文件
file_manifest = [txt]
@ -599,7 +622,7 @@ def get_files_from_everything(txt, type): # type='.md'
elif os.path.exists(txt):
# 本地路径,递归搜索
project_folder = txt
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)]
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*' + type, recursive=True)]
if len(file_manifest) == 0:
success = False
else:
@ -610,8 +633,6 @@ def get_files_from_everything(txt, type): # type='.md'
return success, file_manifest, project_folder
def Singleton(cls):
_instance = {}
@ -638,12 +659,11 @@ class knowledge_archive_interface():
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate(): # 临时地激活代理网络
with ProxyNetworkActivate(): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
def feed_archive(self, file_manifest, id="default"):
self.threadLock.acquire()
# import uuid
@ -656,7 +676,7 @@ class knowledge_archive_interface():
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
text2vec=self.get_chinese_text2vec(),
)
self.threadLock.release()
@ -678,23 +698,24 @@ class knowledge_archive_interface():
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
text2vec=self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt,
vs_path = self.kai_path,
query=txt,
vs_path=self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
text2vec=self.get_chinese_text2vec(),
)
self.threadLock.release()
return resp, prompt
def try_install_deps(deps):
for dep in deps:
import subprocess, sys

View File

@ -657,7 +657,6 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder)
# <-------- 写出文件 ---------->
msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}"
final_tex = lps.merge_result(pfg.file_result, mode, msg)
@ -744,7 +743,6 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
@ -769,7 +767,6 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
if os.path.exists(pj(work_folder, '..', 'translation')):
shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf'))
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
return True # 成功啦
else:

View File

@ -27,22 +27,20 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
}
response = requests.post(url, headers=headers, json=data, proxies=proxies)
print(response.content)
try:
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
except:
raise RuntimeError(response.content.decode())
# 文件保存到本地
r = requests.get(image_url, proxies=proxies)
file_path = 'gpt_log/image_gen/'
os.makedirs(file_path, exist_ok=True)
file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
with open(file_path + file_name, 'wb+') as f:
f.write(r.content)
return image_url, file_path + file_name
with open(file_path+file_name, 'wb+') as f: f.write(r.content)
return image_url, file_path+file_name
@CatchException

View File

@ -53,10 +53,9 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
)
iteration_results.append(gpt_say)
last_iteration_result = gpt_say
############################## <第 3 步整理history> ##################################
final_results.extend(iteration_results)
# 将摘要添加到历史中,方便"猜你想问"使用
history.extend([last_iteration_result])
final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。')
# 接下来两句话只显示在界面上,不起实际作用
i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。"
@ -113,4 +112,3 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
txt = file_manifest[0]
# 开始正式执行任务
yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)

View File

@ -144,13 +144,3 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, )
if __name__ == '__main__':
import json
filename = ''
code = parseNotebook(filename)
print(code)
with open(filename, 'r', encoding='utf-8', errors='replace') as f:
notebook = f.read()
print(notebook)

View File

@ -1,67 +1,78 @@
from toolbox import update_ui
from toolbox import CatchException, report_execption, write_results_to_file
from toolbox import update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
fast_debug = False
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
import time, glob, os
import time
import os
print('begin analysis on:', file_manifest)
for index, fp in enumerate(file_manifest):
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
file_content = f.read()
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index == 0 else ""
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if not fast_debug:
msg = '正常'
# ** gpt request **
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs,
chatbot, history=[],
sys_prompt=system_prompt) # 带超时倒计时
chatbot[-1] = (i_say_show_user, gpt_say)
history.append(i_say_show_user); history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
history.append(i_say_show_user);
history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
if not fast_debug: time.sleep(2)
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
chatbot.append((i_say, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if not fast_debug:
msg = '正常'
# ** gpt request **
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot,
history=history,
sys_prompt=system_prompt) # 带超时倒计时
chatbot[-1] = (i_say, gpt_say)
history.append(i_say); history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
history.append(i_say)
history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
res = write_results_to_file(history)
chatbot.append(("完成了吗?", res))
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
@CatchException
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
history = [] # 清空历史,以免输入溢出
import glob, os
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, system_prompt, web_port, history=None):
# history = [] # 清空历史,以免输入溢出
if history is None:
history = [] # 清空历史,以免输入溢出
import glob
import os
if os.path.exists(txt):
project_folder = txt
else:
if txt == "": txt = '空空如也的输入栏'
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if txt == "":
txt = '空空如也的输入栏'
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
if len(file_manifest) == 0:
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)

View File

@ -13,13 +13,8 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
show_say = txt
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
else:
prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。"
show_say = '分析上述回答,再列出用户可能提出的三个问题。'
try:
prompt = history[-1]+f"\n{show_say}"
except IndexError:
prompt = system_prompt+"\n再列出用户可能提出的三个问题。"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=prompt,
inputs_show_user=show_say,
@ -28,8 +23,6 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
history=history,
sys_prompt=system_prompt
)
chatbot[-1] = (show_say, gpt_say)
history.extend([show_say, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

View File

@ -1,806 +0,0 @@
:root {
--chatbot-color-light: #000000;
--chatbot-color-dark: #FFFFFF;
--chatbot-background-color-light: #F3F3F3;
--chatbot-background-color-dark: #121111;
--message-user-background-color-light: #95EC69;
--message-user-background-color-dark: #26B561;
--message-bot-background-color-light: #FFFFFF;
--message-bot-background-color-dark: #2C2C2C;
}
mspace {
display: block;
}
@media only screen and (max-width: 767px) {
#column_1 {
display: none !important;
}
}
@keyframes highlight {
0%, 100% {
border: 2px solid transparent;
}
50% {
border-color: yellow;
}
}
#highlight_update {
animation-name: highlight;
animation-duration: 0.75s;
animation-iteration-count: 3;
}
.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno {
border: 0px solid var(--border-color-primary) !important;
}
#examples_col {
z-index: 2;
position: absolute;
bottom: 0;
left: 0;
width: 100%;
margin-bottom: 30% !important;
}
#hide_examples {
z-index: 0;
}
#debug_mes {
position: absolute;
display: flex;
bottom: 0;
left: 0;
z-index: 1; /* 设置更高的 z-index 值 */
margin-bottom: -4px !important;
align-self: flex-end;
}
#chat_box {
display: flex;
flex-direction: column;
overflow-y: visible !important;
z-index: 3;
flex-grow: 1; /* 自动填充剩余空间 */
position: absolute;
bottom: 0;
left: 0;
width: 100%;
margin-bottom: 30px !important;
border: 1px solid var(--border-color-primary);
}
.toast-body {
z-index: 5 !important;
}
.chat_input {
}
.sm_btn {
position: relative;
bottom: 5px;
height: 10%;
border-radius: 20px!important;
min-width: min(10%,100%) !important;
overflow: hidden;
}
.sm_select {
position: relative !important;
z-index: 5 !important;
bottom: 5px;
min-width: min(20%,100%) !important;
border-radius: 20px!important;
}
.sm_checkbox {
position: relative !important;
z-index: 5 !important;
bottom: 5px;
padding: 0 !important;
}
.sm_select .wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
padding: 0 !important;
}
.sm_select .block.svelte-mppz8v {
width: 10% !important;
}
/* usage_display */
.insert_block {
position: relative;
bottom: 2px;
min-width: min(55px,100%) !important;
}
.submit_btn {
flex-direction: column-reverse;
overflow-y: auto !important;
position: absolute;
bottom: 0;
right: 10px;
margin-bottom: 10px !important;
min-width: min(50px,100%) !important;
}
textarea {
resize: none;
height: 100%; /* 填充父元素的高度 */
}
#main_chatbot {
height: 75vh !important;
max-height: 75vh !important;
/* overflow: auto !important; */
z-index: 2;
transform: translateZ(0) !important;
backface-visibility: hidden !important;
will-change: transform !important;
}
#prompt_result{
height: 60vh !important;
max-height: 60vh !important;
}
#app_title {
font-weight: var(--prose-header-text-weight);
font-size: var(--text-xxl);
line-height: 1.3;
text-align: left;
margin-top: 6px;
white-space: nowrap;
}
#description {
text-align: center;
margin: 32px 0 4px 0;
}
/* gradio的页脚信息 */
footer {
/* display: none !important; */
margin-top: .2em !important;
font-size: 85%;
}
#footer {
text-align: center;
}
#footer div {
display: inline-block;
}
#footer .versions{
font-size: 85%;
opacity: 0.60;
}
#float_display {
position: absolute;
max-height: 30px;
}
/* user_info */
#user_info {
white-space: nowrap;
position: absolute; left: 8em; top: .2em;
z-index: var(--layer-2);
box-shadow: var(--block-shadow);
border: none; border-radius: var(--block-label-radius);
background: var(--color-accent);
padding: var(--block-label-padding);
font-size: var(--block-label-text-size); line-height: var(--line-sm);
width: auto; min-height: 30px !important;
opacity: 1;
transition: opacity 0.3s ease-in-out;
}
textarea.svelte-1pie7s6 {
background: #e7e6e6 !important;
width: 96% !important;
}
.dark textarea.svelte-1pie7s6 {
background: var(--input-background-fill) !important;
width: 96% !important;
}
.dark input[type=number].svelte-1cl284s {
background: #393939 !important;
border: var(--input-border-width) solid var(--input-border-color) !important;
}
.dark input[type="range"] {
background: #393939 !important;
}
#user_info .wrap {
opacity: 0;
}
#user_info p {
color: white;
font-weight: var(--block-label-text-weight);
}
#user_info.hideK {
opacity: 0;
transition: opacity 1s ease-in-out;
}
[class *= "message"] {
gap: 7px !important;
border-radius: var(--radius-xl) !important
}
/* debug_mes */
#debug_mes {
min-height: 2em;
align-items: flex-end;
justify-content: flex-end;
}
#debug_mes p {
font-size: .85em;
font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace;
/* Windows下中文的monospace会fallback为新宋体实在太丑这里折中使用微软雅黑 */
color: #000000;
}
.dark #debug_mes p {
color: #ee65ed;
}
#debug_mes {
transition: all 0.6s;
}
#main_chatbot {
transition: height 0.3s ease;
}
.wrap.svelte-18telvq.svelte-18telvq {
padding: var(--block-padding) !important;
height: 100% !important;
max-height: 95% !important;
overflow-y: auto !important;
}
.app.svelte-1mya07g.svelte-1mya07g {
max-width: 100%;
position: relative;
/* margin: auto; */
padding: var(--size-4);
width: 100%;
height: 100%;
}
.gradio-container-3-32-2 h1 {
font-weight: 700 !important;
font-size: 28px !important;
}
.gradio-container-3-32-2 h2 {
font-weight: 600 !important;
font-size: 24px !important;
}
.gradio-container-3-32-2 h3 {
font-weight: 500 !important;
font-size: 20px !important;
}
.gradio-container-3-32-2 h4 {
font-weight: 400 !important;
font-size: 16px !important;
}
.gradio-container-3-32-2 h5 {
font-weight: 300 !important;
font-size: 14px !important;
}
.gradio-container-3-32-2 h6 {
font-weight: 200 !important;
font-size: 12px !important;
}
#usage_display p, #usage_display span {
margin: 0;
font-size: .85em;
color: var(--body-text-color-subdued);
}
.progress-bar {
background-color: var(--input-background-fill);;
margin: .5em 0 !important;
height: 20px;
border-radius: 10px;
overflow: hidden;
}
.progress {
background-color: var(--block-title-background-fill);
height: 100%;
border-radius: 10px;
text-align: right;
transition: width 0.5s ease-in-out;
}
.progress-text {
/* color: white; */
color: var(--color-accent) !important;
font-size: 1em !important;
font-weight: bold;
padding-right: 10px;
line-height: 20px;
}
.apSwitch {
top: 2px;
display: inline-block;
height: 24px;
position: relative;
width: 48px;
border-radius: 12px;
}
.apSwitch input {
display: none !important;
}
.apSlider {
background-color: var(--neutral-200);
bottom: 0;
cursor: pointer;
left: 0;
position: absolute;
right: 0;
top: 0;
transition: .4s;
font-size: 18px;
border-radius: 7px;
}
.apSlider::before {
bottom: -1.5px;
left: 1px;
position: absolute;
transition: .4s;
content: "🌞";
}
hr.append-display {
margin: 8px 0;
border: none;
height: 1px;
border-top-width: 0;
background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1));
}
.source-a {
font-size: 0.8em;
max-width: 100%;
margin: 0;
display: flex;
flex-direction: row;
flex-wrap: wrap;
align-items: center;
/* background-color: #dddddd88; */
border-radius: 1.5rem;
padding: 0.2em;
}
.source-a a {
display: inline-block;
background-color: #aaaaaa50;
border-radius: 1rem;
padding: 0.5em;
text-align: center;
text-overflow: ellipsis;
overflow: hidden;
min-width: 20%;
white-space: nowrap;
margin: 0.2rem 0.1rem;
text-decoration: none !important;
flex: 1;
transition: flex 0.5s;
}
.source-a a:hover {
background-color: #aaaaaa20;
flex: 2;
}
input:checked + .apSlider {
background-color: var(--primary-600);
}
input:checked + .apSlider::before {
transform: translateX(23px);
content:"🌚";
}
/* Override Slider Styles (for webkit browsers like Safari and Chrome)
* 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410
* 进度滑块在各个平台还是太不统一了
*/
input[type="range"] {
-webkit-appearance: none;
height: 4px;
background: var(--input-background-fill);
border-radius: 5px;
background-image: linear-gradient(var(--primary-500),var(--primary-500));
background-size: 0% 100%;
background-repeat: no-repeat;
}
input[type="range"]::-webkit-slider-thumb {
-webkit-appearance: none;
height: 20px;
width: 20px;
border-radius: 50%;
border: solid 0.5px #ddd;
background-color: white;
cursor: ew-resize;
box-shadow: var(--input-shadow);
transition: background-color .1s ease;
}
input[type="range"]::-webkit-slider-thumb:hover {
background: var(--neutral-50);
}
input[type=range]::-webkit-slider-runnable-track {
-webkit-appearance: none;
box-shadow: none;
border: none;
background: transparent;
}
.submit_btn, #cancel_btn {
height: 42px !important;
}
.submit_btn::before {
content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
height: 21px;
}
#cancel_btn::before {
content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
height: 21px;
}
/* list */
ol:not(.options), ul:not(.options) {
padding-inline-start: 2em !important;
}
/* 亮色(默认) */
#main_chatbot {
background-color: var(--chatbot-background-color-light) !important;
color: var(--chatbot-color-light) !important;
}
/* 暗色 */
.dark #main_chatbot {
background-color: var(--block-background-fill) !important;
color: var(--chatbot-color-dark) !important;
}
/* 屏幕宽度大于等于500px的设备 */
/* update on 2023.4.8: 高度的细致调整已写入JavaScript */
@media screen and (min-width: 500px) {
#main_chatbot {
height: calc(100vh - 200px);
}
#main_chatbot .wrap {
max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
}
}
/* 屏幕宽度小于500px的设备 */
@media screen and (max-width: 499px) {
#main_chatbot {
height: calc(100vh - 140px);
}
#main_chatbot .wrap {
max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
}
[data-testid = "bot"] {
max-width: 95% !important;
}
#app_title h1{
letter-spacing: -1px; font-size: 22px;
}
}
#main_chatbot .wrap {
overflow-x: hidden
}
/* 对话气泡 */
.message {
border-radius: var(--radius-xl) !important;
border: none;
padding: var(--spacing-xl) !important;
font-size: 15px !important;
line-height: var(--line-md) !important;
min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
}
[data-testid = "bot"] {
max-width: 85%;
border-bottom-left-radius: 0 !important;
}
[data-testid = "user"] {
max-width: 85%;
width: auto !important;
border-bottom-right-radius: 0 !important;
}
.message p {
margin-top: 0.6em !important;
margin-bottom: 0.6em !important;
}
.message p:first-child { margin-top: 0 !important; }
.message p:last-of-type { margin-bottom: 0 !important; }
.message .md-message {
display: block;
padding: 0 !important;
}
.message .raw-message {
display: block;
padding: 0 !important;
white-space: pre-wrap;
}
.raw-message.hideM, .md-message.hideM {
display: none;
}
/* custom buttons */
.chuanhu-btn {
border-radius: 5px;
/* background-color: #E6E6E6 !important; */
color: rgba(120, 120, 120, 0.64) !important;
padding: 4px !important;
position: absolute;
right: -22px;
cursor: pointer !important;
transition: color .2s ease, background-color .2s ease;
}
.chuanhu-btn:hover {
background-color: rgba(167, 167, 167, 0.25) !important;
color: unset !important;
}
.chuanhu-btn:active {
background-color: rgba(167, 167, 167, 0.5) !important;
}
.chuanhu-btn:focus {
outline: none;
}
.copy-bot-btn {
/* top: 18px; */
bottom: 0;
}
.toggle-md-btn {
/* top: 0; */
bottom: 20px;
}
.copy-code-btn {
position: relative;
float: right;
font-size: 1em;
cursor: pointer;
}
.message-wrap>div img{
border-radius: 10px !important;
}
/* history message */
.wrap>.history-message {
padding: 10px !important;
}
.history-message {
/* padding: 0 !important; */
opacity: 80%;
display: flex;
flex-direction: column;
}
.history-message>.history-message {
padding: 0 !important;
}
.history-message>.message-wrap {
padding: 0 !important;
margin-bottom: 16px;
}
.history-message>.message {
margin-bottom: 16px;
}
.wrap>.history-message::after {
content: "";
display: block;
height: 2px;
background-color: var(--body-text-color-subdued);
margin-bottom: 10px;
margin-top: -10px;
clear: both;
}
.wrap>.history-message>:last-child::after {
content: "仅供查看";
display: block;
text-align: center;
color: var(--body-text-color-subdued);
font-size: 0.8em;
}
/* 表格 */
table {
margin: 1em 0;
border-collapse: collapse;
empty-cells: show;
}
td,th {
border: 1.2px solid var(--border-color-primary) !important;
padding: 0.2em;
}
thead {
background-color: rgba(175,184,193,0.2);
}
thead th {
padding: .5em .2em;
}
/* 行内代码 */
.message :not(pre) code {
display: inline;
white-space: break-spaces;
border-radius: 6px;
margin: 0 2px 0 2px;
padding: .2em .4em .1em .4em;
background-color: rgba(175,184,193,0.2);
}
/* 代码块 */
.message pre code {
display: block;
overflow: auto;
white-space: pre;
background-color: hsla(0, 0%, 7%, 70%)!important;
border-radius: 10px;
padding: 1.2em 1em 0em .5em;
margin: 0.6em 2em 1em 0.2em;
color: #FFF;
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
}
.dark .message pre code {
background-color: hsla(0, 0%, 20%, 300%)!important;
}
.message pre {
padding: 0 !important;
}
.message pre code div.highlight {
background-color: unset !important;
}
button.copy-button {
display: none;
}
/* 代码高亮样式 */
.codehilite .hll { background-color: #6e7681 }
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
.codehilite .err { color: #f85149 } /* Error */
.codehilite .esc { color: #c9d1d9 } /* Escape */
.codehilite .g { color: #c9d1d9 } /* Generic */
.codehilite .k { color: #ff7b72 } /* Keyword */
.codehilite .l { color: #a5d6ff } /* Literal */
.codehilite .n { color: #c9d1d9 } /* Name */
.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */
.codehilite .x { color: #c9d1d9 } /* Other */
.codehilite .p { color: #c9d1d9 } /* Punctuation */
.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */
.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */
.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */
.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */
.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */
.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */
.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */
.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */
.codehilite .gr { color: #ffa198 } /* Generic.Error */
.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */
.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */
.codehilite .go { color: #8b949e } /* Generic.Output */
.codehilite .gp { color: #8b949e } /* Generic.Prompt */
.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */
.codehilite .gu { color: #79c0ff } /* Generic.Subheading */
.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */
.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */
.codehilite .kc { color: #79c0ff } /* Keyword.Constant */
.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */
.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */
.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */
.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */
.codehilite .kt { color: #ff7b72 } /* Keyword.Type */
.codehilite .ld { color: #79c0ff } /* Literal.Date */
.codehilite .m { color: #a5d6ff } /* Literal.Number */
.codehilite .s { color: #a5d6ff } /* Literal.String */
.codehilite .na { color: #c9d1d9 } /* Name.Attribute */
.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */
.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */
.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */
.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */
.codehilite .ni { color: #ffa657 } /* Name.Entity */
.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */
.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */
.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */
.codehilite .nn { color: #ff7b72 } /* Name.Namespace */
.codehilite .nx { color: #c9d1d9 } /* Name.Other */
.codehilite .py { color: #79c0ff } /* Name.Property */
.codehilite .nt { color: #7ee787 } /* Name.Tag */
.codehilite .nv { color: #79c0ff } /* Name.Variable */
.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */
.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */
.codehilite .w { color: #6e7681 } /* Text.Whitespace */
.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */
.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */
.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */
.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */
.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */
.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */
.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */
.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */
.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */
.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */
.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */
.codehilite .se { color: #79c0ff } /* Literal.String.Escape */
.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */
.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */
.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */
.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */
.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */
.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */
.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */
.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */
.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */
.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */
.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */
.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */
.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */
.dark .codehilite .hll { background-color: #2C3B41 }
.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */
.dark .codehilite .err { color: #FF5370 } /* Error */
.dark .codehilite .esc { color: #89DDFF } /* Escape */
.dark .codehilite .g { color: #EEFFFF } /* Generic */
.dark .codehilite .k { color: #BB80B3 } /* Keyword */
.dark .codehilite .l { color: #C3E88D } /* Literal */
.dark .codehilite .n { color: #EEFFFF } /* Name */
.dark .codehilite .o { color: #89DDFF } /* Operator */
.dark .codehilite .p { color: #89DDFF } /* Punctuation */
.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */
.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */
.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */
.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */
.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */
.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */
.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */
.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */
.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */
.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */
.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */
.dark .codehilite .go { color: #79d618 } /* Generic.Output */
.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */
.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */
.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */
.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */
.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */
.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */
.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */
.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */
.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */
.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */
.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */
.dark .codehilite .m { color: #F78C6C } /* Literal.Number */
.dark .codehilite .s { color: #C3E88D } /* Literal.String */
.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */
.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */
.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */
.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */
.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */
.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */
.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */
.dark .codehilite .nf { color: #82AAFF } /* Name.Function */
.dark .codehilite .nl { color: #82AAFF } /* Name.Label */
.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */
.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */
.dark .codehilite .py { color: #FFCB6B } /* Name.Property */
.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */
.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */
.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */
.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */
.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */
.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */
.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */
.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */
.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */
.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */
.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */
.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */
.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */
.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */
.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */
.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */
.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */
.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */
.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */
.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */
.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */
.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */
.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */
.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */
.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */
.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */
.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */

View File

@ -1,465 +0,0 @@
// custom javascript here
const MAX_HISTORY_LENGTH = 32;
var key_down_history = [];
var currentIndex = -1;
var user_input_ta;
var gradioContainer = null;
var user_input_ta = null;
var chat_txt = null;
var userInfoDiv = null;
var appTitleDiv = null;
var chatbot = null;
var chatbotWrap = null;
var apSwitch = null;
var messageBotDivs = null;
var loginUserForm = null;
var logginUser = null;
var userLogged = false;
var usernameGotten = false;
var historyLoaded = false;
var ga = document.getElementsByTagName("gradio-app");
var targetNode = ga[0];
var isInIframe = (window.self !== window.top);
var language = navigator.language.slice(0,2);
var forView_i18n = {
'zh': "仅供查看",
'en': "For viewing only",
'ja': "閲覧専用",
'fr': "Pour consultation seulement",
'es': "Solo para visualización",
};
var deleteConfirm_i18n_pref = {
'zh': "你真的要删除 ",
'en': "Are you sure you want to delete ",
'ja': "本当に ",
};
var deleteConfirm_i18n_suff = {
'zh': " 吗?",
'en': " ?",
'ja': " を削除してもよろしいですか?",
};
var deleteConfirm_msg_pref = "Are you sure you want to delete ";
var deleteConfirm_msg_suff = " ?";
// gradio 页面加载好了么??? 我能动你的元素了么??
function gradioLoaded(mutations) {
for (var i = 0; i < mutations.length; i++) {
if (mutations[i].addedNodes.length) {
loginUserForm = document.querySelector(".gradio-container > .main > .wrap > .panel > .form")
gradioContainer = document.querySelector(".gradio-container");
chat_txt = document.getElementById('chat_txt');
userInfoDiv = document.getElementById("user_info");
appTitleDiv = document.getElementById("app_title");
chatbot = document.querySelector('#废弃');
chatbotWrap = document.querySelector('#废弃 > .wrap');
apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
if (loginUserForm) {
localStorage.setItem("userLogged", true);
userLogged = true;
}
if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没?
adjustDarkMode();
}
if (chat_txt) { // chat_txt 加载出来了没?
selectHistory();
}
if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没?
if (!usernameGotten) {
getUserInfo();
}
setTimeout(showOrHideUserInfo(), 2000);
}
if (chatbot) { // chatbot 加载出来了没?
setChatbotHeight();
}
if (chatbotWrap) {
if (!historyLoaded) {
loadHistoryHtml();
}
setChatbotScroll();
}
}
}
}
function webLocale() {
// console.log("webLocale", language);
if (forView_i18n.hasOwnProperty(language)) {
var forView = forView_i18n[language];
var forViewStyle = document.createElement('style');
forViewStyle.innerHTML = '.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }';
document.head.appendChild(forViewStyle);
}
if (deleteConfirm_i18n_pref.hasOwnProperty(language)) {
deleteConfirm_msg_pref = deleteConfirm_i18n_pref[language];
deleteConfirm_msg_suff = deleteConfirm_i18n_suff[language];
}
}
function showConfirmationDialog(a, file, c) {
if (file != "") {
var result = confirm(deleteConfirm_msg_pref + file + deleteConfirm_msg_suff);
if (result) {
return [a, file, c];
}
}
return [a, "CANCELED", c];
}
function selectHistory() {
user_input_ta = chat_txt.querySelector("textarea");
if (user_input_ta) {
observer.disconnect(); // 停止监听
// 在 textarea 上监听 keydown 事件
user_input_ta.addEventListener("keydown", function (event) {
var value = user_input_ta.value.trim();
// 判断按下的是否为方向键
if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
// 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
if (value && key_down_history.indexOf(value) === -1)
return;
// 对于需要响应的动作,阻止默认行为。
event.preventDefault();
var length = key_down_history.length;
if (length === 0) {
currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
return;
}
if (currentIndex === -1) {
currentIndex = length;
}
if (event.code === 'ArrowUp' && currentIndex > 0) {
currentIndex--;
user_input_ta.value = key_down_history[currentIndex];
} else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
currentIndex++;
user_input_ta.value = key_down_history[currentIndex];
}
user_input_ta.selectionStart = user_input_ta.value.length;
user_input_ta.selectionEnd = user_input_ta.value.length;
const input_event = new InputEvent("input", { bubbles: true, cancelable: true });
user_input_ta.dispatchEvent(input_event);
} else if (event.code === "Enter") {
if (value) {
currentIndex = -1;
if (key_down_history.indexOf(value) === -1) {
key_down_history.push(value);
if (key_down_history.length > MAX_HISTORY_LENGTH) {
key_down_history.shift();
}
}
}
}
});
}
}
var username = null;
function getUserInfo() {
if (usernameGotten) {
return;
}
userLogged = localStorage.getItem('userLogged');
if (userLogged) {
username = userInfoDiv.innerText;
if (username) {
if (username.includes("getting user info…")) {
setTimeout(getUserInfo, 500);
return;
} else if (username === " ") {
localStorage.removeItem("username");
localStorage.removeItem("userLogged")
userLogged = false;
usernameGotten = true;
return;
} else {
username = username.match(/User:\s*(.*)/)[1] || username;
localStorage.setItem("username", username);
usernameGotten = true;
clearHistoryHtml();
}
}
}
}
function toggleUserInfoVisibility(shouldHide) {
if (userInfoDiv) {
if (shouldHide) {
userInfoDiv.classList.add("hideK");
} else {
userInfoDiv.classList.remove("hideK");
}
}
}
function showOrHideUserInfo() {
var sendBtn = document.getElementById("submit_btn");
// Bind mouse/touch events to show/hide user info
appTitleDiv.addEventListener("mouseenter", function () {
toggleUserInfoVisibility(false);
});
userInfoDiv.addEventListener("mouseenter", function () {
toggleUserInfoVisibility(false);
});
sendBtn.addEventListener("mouseenter", function () {
toggleUserInfoVisibility(false);
});
appTitleDiv.addEventListener("mouseleave", function () {
toggleUserInfoVisibility(true);
});
userInfoDiv.addEventListener("mouseleave", function () {
toggleUserInfoVisibility(true);
});
sendBtn.addEventListener("mouseleave", function () {
toggleUserInfoVisibility(true);
});
appTitleDiv.ontouchstart = function () {
toggleUserInfoVisibility(false);
};
userInfoDiv.ontouchstart = function () {
toggleUserInfoVisibility(false);
};
sendBtn.ontouchstart = function () {
toggleUserInfoVisibility(false);
};
appTitleDiv.ontouchend = function () {
setTimeout(function () {
toggleUserInfoVisibility(true);
}, 3000);
};
userInfoDiv.ontouchend = function () {
setTimeout(function () {
toggleUserInfoVisibility(true);
}, 3000);
};
sendBtn.ontouchend = function () {
setTimeout(function () {
toggleUserInfoVisibility(true);
}, 3000); // Delay 1 second to hide user info
};
// Hide user info after 2 second
setTimeout(function () {
toggleUserInfoVisibility(true);
}, 2000);
}
function toggleDarkMode(isEnabled) {
if (isEnabled) {
document.body.classList.add("dark");
document.body.style.setProperty("background-color", "var(--neutral-950)", "important");
} else {
document.body.classList.remove("dark");
document.body.style.backgroundColor = "";
}
}
function adjustDarkMode() {
const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)");
// 根据当前颜色模式设置初始状态
apSwitch.checked = darkModeQuery.matches;
toggleDarkMode(darkModeQuery.matches);
// 监听颜色模式变化
darkModeQuery.addEventListener("change", (e) => {
apSwitch.checked = e.matches;
toggleDarkMode(e.matches);
});
// apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
apSwitch.addEventListener("change", (e) => {
toggleDarkMode(e.target.checked);
});
}
function setChatbotHeight() {
const screenWidth = window.innerWidth;
const statusDisplay = document.querySelector('#status_display');
const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
const wrap = chatbot.querySelector('.wrap');
const vh = window.innerHeight * 0.01;
document.documentElement.style.setProperty('--vh', `${vh}px`);
if (isInIframe) {
chatbot.style.height = `700px`;
wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`
} else {
if (screenWidth <= 320) {
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
} else if (screenWidth <= 499) {
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
} else {
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
}
}
}
function setChatbotScroll() {
var scrollHeight = chatbotWrap.scrollHeight;
chatbotWrap.scrollTo(0,scrollHeight)
}
var rangeInputs = null;
var numberInputs = null;
function setSlider() {
rangeInputs = document.querySelectorAll('input[type="range"]');
numberInputs = document.querySelectorAll('input[type="number"]')
setSliderRange();
rangeInputs.forEach(rangeInput => {
rangeInput.addEventListener('input', setSliderRange);
});
numberInputs.forEach(numberInput => {
numberInput.addEventListener('input', setSliderRange);
})
}
function setSliderRange() {
var range = document.querySelectorAll('input[type="range"]');
range.forEach(range => {
range.style.backgroundSize = (range.value - range.min) / (range.max - range.min) * 100 + '% 100%';
});
}
function addChuanhuButton(botElement) {
var rawMessage = null;
var mdMessage = null;
rawMessage = botElement.querySelector('.raw-message');
mdMessage = botElement.querySelector('.md-message');
if (!rawMessage) {
var buttons = botElement.querySelectorAll('button.chuanhu-btn');
for (var i = 0; i < buttons.length; i++) {
buttons[i].parentNode.removeChild(buttons[i]);
}
return;
}
var copyButton = null;
var toggleButton = null;
copyButton = botElement.querySelector('button.copy-bot-btn');
toggleButton = botElement.querySelector('button.toggle-md-btn');
if (copyButton) copyButton.remove();
if (toggleButton) toggleButton.remove();
// Copy bot button
var copyButton = document.createElement('button');
copyButton.classList.add('chuanhu-btn');
copyButton.classList.add('copy-bot-btn');
copyButton.setAttribute('aria-label', 'Copy');
copyButton.innerHTML = copyIcon;
copyButton.addEventListener('click', () => {
const textToCopy = rawMessage.innerText;
navigator.clipboard
.writeText(textToCopy)
.then(() => {
copyButton.innerHTML = copiedIcon;
setTimeout(() => {
copyButton.innerHTML = copyIcon;
}, 1500);
})
.catch(() => {
console.error("copy failed");
});
});
botElement.appendChild(copyButton);
// Toggle button
var toggleButton = document.createElement('button');
toggleButton.classList.add('chuanhu-btn');
toggleButton.classList.add('toggle-md-btn');
toggleButton.setAttribute('aria-label', 'Toggle');
var renderMarkdown = mdMessage.classList.contains('hideM');
toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon;
toggleButton.addEventListener('click', () => {
renderMarkdown = mdMessage.classList.contains('hideM');
if (renderMarkdown){
renderMarkdownText(botElement);
toggleButton.innerHTML=rawIcon;
} else {
removeMarkdownText(botElement);
toggleButton.innerHTML=mdIcon;
}
});
botElement.insertBefore(toggleButton, copyButton);
}
function renderMarkdownText(message) {
var mdDiv = message.querySelector('.md-message');
if (mdDiv) mdDiv.classList.remove('hideM');
var rawDiv = message.querySelector('.raw-message');
if (rawDiv) rawDiv.classList.add('hideM');
}
function removeMarkdownText(message) {
var rawDiv = message.querySelector('.raw-message');
if (rawDiv) rawDiv.classList.remove('hideM');
var mdDiv = message.querySelector('.md-message');
if (mdDiv) mdDiv.classList.add('hideM');
}
let timeoutId;
let isThrottled = false;
var mmutation
// 监听所有元素中 bot message 的变化,为 bot 消息添加复制按钮。
var mObserver = new MutationObserver(function (mutationsList) {
for (mmutation of mutationsList) {
if (mmutation.type === 'childList') {
for (var node of mmutation.addedNodes) {
if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
saveHistoryHtml();
document.querySelectorAll('#废弃>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
}
if (node.tagName === 'INPUT' && node.getAttribute('type') === 'range') {
setSlider();
}
}
for (var node of mmutation.removedNodes) {
if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
saveHistoryHtml();
document.querySelectorAll('#废弃>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
}
}
} else if (mmutation.type === 'attributes') {
if (mmutation.target.nodeType === 1 && mmutation.target.classList.contains('message') && mmutation.target.getAttribute('data-testid') === 'bot') {
if (isThrottled) break; // 为了防止重复不断疯狂渲染加上等待_(:з」∠)_
isThrottled = true;
clearTimeout(timeoutId);
timeoutId = setTimeout(() => {
isThrottled = false;
document.querySelectorAll('#废弃>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
saveHistoryHtml();
}, 500);
}
}
}
});
mObserver.observe(document.documentElement, { attributes: true, childList: true, subtree: true });
// 监视页面内部 DOM 变动
var observer = new MutationObserver(function (mutations) {
gradioLoaded(mutations);
});
observer.observe(targetNode, { childList: true, subtree: true });
// 监视页面变化
window.addEventListener("DOMContentLoaded", function () {
isInIframe = (window.self !== window.top);
historyLoaded = false;
});
window.addEventListener('resize', setChatbotHeight);
window.addEventListener('scroll', setChatbotHeight);
window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
// button svg code
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
const mdIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="1" viewBox="0 0 14 18" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><g transform-origin="center" transform="scale(0.85)"><path d="M1.5,0 L12.5,0 C13.3284271,-1.52179594e-16 14,0.671572875 14,1.5 L14,16.5 C14,17.3284271 13.3284271,18 12.5,18 L1.5,18 C0.671572875,18 1.01453063e-16,17.3284271 0,16.5 L0,1.5 C-1.01453063e-16,0.671572875 0.671572875,1.52179594e-16 1.5,0 Z" stroke-width="1.8"></path><line x1="3.5" y1="3.5" x2="10.5" y2="3.5"></line><line x1="3.5" y1="6.5" x2="8" y2="6.5"></line></g><path d="M4,9 L10,9 C10.5522847,9 11,9.44771525 11,10 L11,13.5 C11,14.0522847 10.5522847,14.5 10,14.5 L4,14.5 C3.44771525,14.5 3,14.0522847 3,13.5 L3,10 C3,9.44771525 3.44771525,9 4,9 Z" stroke="none" fill="currentColor"></path></svg></span>';
const rawIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="1.8" viewBox="0 0 18 14" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><g transform-origin="center" transform="scale(0.85)"><polyline points="4 3 0 7 4 11"></polyline><polyline points="14 3 18 7 14 11"></polyline><line x1="12" y1="0" x2="6" y2="14"></line></g></svg></span>';

View File

@ -1,2 +0,0 @@
// external javascript here

View File

@ -1,8 +0,0 @@
<div style="display: flex; justify-content: space-between;">
<span>
<label class="apSwitch" for="checkbox">
<input type="checkbox" id="checkbox">
<div class="apSlider"></div>
</label>
</span>
</div>

View File

@ -1,9 +0,0 @@
<b>{label}</b>
<div class="progress-bar">
<div class="progress" style="width: {usage_percent}%;">
<span class="progress-text">{usage_percent}%</span>
</div>
</div>
<div style="display: flex; justify-content: space-between;">
<span>${rounded_usage}</span><span>${usage_limit}</span>
</div>

View File

@ -1 +0,0 @@
<div class="versions">{versions}</div>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

View File

@ -49,7 +49,7 @@ def markdown_convertion(txt):
"""
将Markdown格式的文本转换为HTML格式。如果包含数学公式则先将公式转换为HTML格式。
"""
pre = '<div class="md-message">'
pre = '<div class="markdown-body">'
suf = '</div>'
if txt.startswith(pre) and txt.endswith(suf):
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')

View File

@ -265,7 +265,7 @@
"例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "e.g. chatglm&gpt-3.5-turbo&api2d-gpt-4",
"先切换模型到openai或api2d": "Switch the model to openai or api2d first",
"在这里输入分辨率": "Enter the resolution here",
"如'256x256', '512x512', '1024x1024'": "e.g. '256x256', '512x512', '1024x1024'",
"如256x256": "e.g. 256x256",
"默认": "Default",
"建议您复制一个config_private.py放自己的秘密": "We suggest you to copy a config_private.py file to keep your secrets, such as API and proxy URLs, from being accidentally uploaded to Github and seen by others.",
"如API和代理网址": "Such as API and proxy URLs",

View File

@ -12,7 +12,7 @@ try {
live2d_settings['waifuTipsSize'] = '187x52';
live2d_settings['canSwitchModel'] = true;
live2d_settings['canSwitchTextures'] = true;
live2d_settings['canSwitchHitokoto'] = true;
live2d_settings['canSwitchHitokoto'] = false;
live2d_settings['canTakeScreenshot'] = false;
live2d_settings['canTurnToHomePage'] = false;
live2d_settings['canTurnToAboutPage'] = false;

View File

@ -34,10 +34,10 @@
"2": ["来自 Potion Maker 的 Tia 酱 ~"]
},
"hitokoto_api_message": {
"lwl12.com": ["这句一言来自 <span style=\"color:#ff99da;\">『{source}』</span>", ",是 <span style=\"color:#ff99da;\">{creator}</span> 投稿的", "。"],
"fghrsh.net": ["这句一言出处是 <span style=\"color:#ff99da;\">『{source}』</span>,是 <span style=\"color:#ff99da;\">FGHRSH</span> 在 {date} 收藏的!"],
"jinrishici.com": ["这句诗词出自 <span style=\"color:#ff99da;\">《{title}》</span>,是 {dynasty}诗人 {author} 创作的!"],
"hitokoto.cn": ["这句一言来自 <span style=\"color:#ff99da;\">『{source}』</span>,是 <span style=\"color:#ff99da;\">{creator}</span> 在 hitokoto.cn 投稿的。"]
"lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
"fghrsh.net": ["这句一言出处是 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">FGHRSH</span> 在 {date} 收藏的!"],
"jinrishici.com": ["这句诗词出自 <span style=\"color:#0099cc;\">《{title}》</span>,是 {dynasty}诗人 {author} 创作的!"],
"hitokoto.cn": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">{creator}</span> 在 hitokoto.cn 投稿的。"]
}
},
"mouseover": [

View File

@ -1,778 +0,0 @@
#! .\venv\
# encoding: utf-8
# @Time : 2023/4/18
# @Author : Spike
# @Descr :
import ast
import copy
import hashlib
import io
import json
import os.path
import subprocess
import threading
import time
from concurrent.futures import ThreadPoolExecutor
import Levenshtein
import psutil
import re
import tempfile
import shutil
from contextlib import ExitStack
import logging
import yaml
import requests
import tiktoken
logger = logging
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from scipy.linalg import norm
import pyperclip
import random
import gradio as gr
import toolbox
from prompt_generator import SqliteHandle
from bs4 import BeautifulSoup
import copy
"""contextlib 是 Python 标准库中的一个模块,提供了一些工具函数和装饰器,用于支持编写上下文管理器和处理上下文的常见任务,例如资源管理、异常处理等。
官网https://docs.python.org/3/library/contextlib.html"""
class Shell(object):
def __init__(self, args, stream=False):
self.args = args
self.subp = subprocess.Popen(args, shell=True,
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
stdout=subprocess.PIPE, encoding='utf-8',
errors='ignore', close_fds=True)
self.__stream = stream
self.__temp = ''
def read(self):
logger.debug(f'The command being executed is: "{self.args}"')
if self.__stream:
sysout = self.subp.stdout
try:
with sysout as std:
for i in std:
logger.info(i.rstrip())
self.__temp += i
except KeyboardInterrupt as p:
return 3, self.__temp + self.subp.stderr.read()
finally:
return 3, self.__temp + self.subp.stderr.read()
else:
sysout = self.subp.stdout.read()
syserr = self.subp.stderr.read()
self.subp.stdin
if sysout:
logger.debug(f"{self.args} \n{sysout}")
return 1, sysout
elif syserr:
logger.error(f"{self.args} \n{syserr}")
return 0, syserr
else:
logger.debug(f"{self.args} \n{[sysout], [sysout]}")
return 2, '\n{}\n{}'.format(sysout, sysout)
def sync(self):
logger.debug('The command being executed is: "{}"'.format(self.args))
for i in self.subp.stdout:
logger.debug(i.rstrip())
self.__temp += i
yield self.__temp
for i in self.subp.stderr:
logger.debug(i.rstrip())
self.__temp += i
yield self.__temp
def timeStatistics(func):
"""
统计函数执行时常的装饰器
"""
def statistics(*args, **kwargs):
startTiem = time.time()
obj = func(*args, **kwargs)
endTiem = time.time()
ums = startTiem - endTiem
print('func:{} > Time-consuming: {}'.format(func, ums))
return obj
return statistics
def copy_temp_file(file):
if os.path.exists(file):
exdir = tempfile.mkdtemp()
temp_ = shutil.copy(file, os.path.join(exdir, os.path.basename(file)))
return temp_
else:
return None
def md5_str(st):
# 创建一个 MD5 对象
md5 = hashlib.md5()
# 更新 MD5 对象的内容
md5.update(str(st).encode())
# 获取加密后的结果
result = md5.hexdigest()
return result
def html_tag_color(tag, color=None, font='black'):
"""
将文本转换为带有高亮提示的html代码
"""
if not color:
rgb = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
color = f"rgb{rgb}"
tag = f'<span style="background-color: {color}; font-weight: bold; color: {font}">&nbsp;{tag}&ensp;</span>'
return tag
def html_a_blank(__href, name=''):
if not name:
name = __href
a = f'<a href="{__href}" target="_blank" class="svelte-xrr240">{name}</a>'
return a
def html_view_blank(__href, file_name=''):
if os.path.exists(__href):
__href = f'/file={__href}'
if not file_name:
file_name = __href.split('/')[-1]
a = f'<a href="{__href}" target="_blank" class="svelte-xrr240">{file_name}</a>'
return a
def html_iframe_code(html_file):
proxy, = toolbox.get_conf('LOCAL_PORT')
html_file = f'http://{ipaddr()}:{proxy}/file={html_file}'
ifr = f'<iframe width="100%" height="500px" frameborder="0" src="{html_file}"></iframe>'
return ifr
def html_download_blank(__href, file_name='temp', dir_name=''):
if os.path.exists(__href):
__href = f'/file={__href}'
if not dir_name:
dir_name = file_name
a = f'<a href="{__href}" target="_blank" download="{dir_name}" class="svelte-xrr240">{file_name}</a>'
return a
def html_local_img(__file):
a = f'<div align="center"><img src="file={__file}"></div>'
return a
def ipaddr():
# 获取本地ipx
ip = psutil.net_if_addrs()
for i in ip:
if ip[i][0][3]:
return ip[i][0][1]
def encryption_str(txt: str):
"""(关键字)(加密间隔)匹配机制(关键字间隔)"""
txt = str(txt)
pattern = re.compile(rf"(Authorization|WPS-Sid|Cookie)(:|\s+)\s*(\S+)[\s\S]*?(?=\n|$|\s)", re.IGNORECASE)
result = pattern.sub(lambda x: x.group(1) + ": XXXXXXXX", txt)
return result
def tree_out(dir=os.path.dirname(__file__), line=2, more=''):
"""
获取本地文件的树形结构转化为Markdown代码文本
"""
out = Shell(f'tree {dir} -F -I "__*|.*|venv|*.png|*.xlsx" -L {line} {more}').read()[1]
localfile = os.path.join(os.path.dirname(__file__), '.tree.md')
with open(localfile, 'w') as f:
f.write('```\n')
ll = out.splitlines()
for i in range(len(ll)):
if i == 0:
f.write(ll[i].split('/')[-2] + '\n')
else:
f.write(ll[i] + '\n')
f.write('```\n')
def chat_history(log: list, split=0):
"""
auto_gpt 使用的代码,后续会迁移
"""
if split:
log = log[split:]
chat = ''
history = ''
for i in log:
chat += f'{i[0]}\n\n'
history += f'{i[1]}\n\n'
return chat, history
def df_similarity(s1, s2):
"""弃用,会警告,这个库不会用"""
def add_space(s):
return ' '.join(list(s))
# 将字中间加入空格
s1, s2 = add_space(s1), add_space(s2)
# 转化为TF矩阵
cv = CountVectorizer(tokenizer=lambda s: s.split())
corpus = [s1, s2]
vectors = cv.fit_transform(corpus).toarray()
# 计算TF系数
return np.dot(vectors[0], vectors[1]) / (norm(vectors[0]) * norm(vectors[1]))
def check_json_format(file):
"""
检查上传的Json文件是否符合规范
"""
new_dict = {}
data = JsonHandle(file).load()
if type(data) is list and len(data) > 0:
if type(data[0]) is dict:
for i in data:
new_dict.update({i['act']: i['prompt']})
return new_dict
def json_convert_dict(file):
"""
批量将json转换为字典
"""
new_dict = {}
for root, dirs, files in os.walk(file):
for f in files:
if f.startswith('prompt') and f.endswith('json'):
new_dict.update(check_json_format(f))
return new_dict
def draw_results(txt, prompt: gr.Dataset, percent, switch, ipaddr: gr.Request):
"""
绘制搜索结果
Args:
txt (str): 过滤文本
prompt : 原始的dataset对象
percent (int): TF系数用于计算文本相似度
switch (list): 过滤个人或所有人的Prompt
ipaddr : 请求人信息
Returns:
注册函数所需的元祖对象
"""
data = diff_list(txt, percent=percent, switch=switch, hosts=ipaddr.client.host)
prompt.samples = data
return prompt.update(samples=data, visible=True), prompt
def diff_list(txt='', percent=0.70, switch: list = None, lst: dict = None, sp=15, hosts=''):
"""
按照搜索结果统计相似度的文本,两组文本相似度>70%的将统计在一起取最长的作为key
Args:
txt (str): 过滤文本
percent (int): TF系数用于计算文本相似度
switch (list): 过滤个人或所有人的Prompt
lst指定一个列表或字典
sp: 截取展示的文本长度
hosts : 请求人的ip
Returns:
返回一个列表
"""
count_dict = {}
is_all = toolbox.get_conf('prompt_list')[0]['key'][1]
if not lst:
lst = {}
tabs = SqliteHandle().get_tables()
if is_all in switch:
lst.update(SqliteHandle(f"ai_common_{hosts}").get_prompt_value(txt))
else:
for tab in tabs:
if tab.startswith('ai_common'):
lst.update(SqliteHandle(f"{tab}").get_prompt_value(txt))
lst.update(SqliteHandle(f"ai_private_{hosts}").get_prompt_value(txt))
# diff 数据根据precent系数归类数据
str_ = time.time()
def tf_factor_calcul(i):
found = False
dict_copy = count_dict.copy()
for key in dict_copy.keys():
str_tf = Levenshtein.jaro_winkler(i, key)
if str_tf >= percent:
if len(i) > len(key):
count_dict[i] = count_dict.copy()[key] + 1
count_dict.pop(key)
else:
count_dict[key] += 1
found = True
break
if not found: count_dict[i] = 1
with ThreadPoolExecutor(100) as executor:
executor.map(tf_factor_calcul, lst)
print('计算耗时', time.time()-str_)
sorted_dict = sorted(count_dict.items(), key=lambda x: x[1], reverse=True)
if switch:
sorted_dict += prompt_retrieval(is_all=switch, hosts=hosts, search=True)
dateset_list = []
for key in sorted_dict:
# 开始匹配关键字
index = str(key[0]).lower().find(txt.lower())
index_ = str(key[1]).lower().find(txt.lower())
if index != -1 or index_ != -1:
if index == -1: index = index_ # 增加搜索prompt 名称
# sp=split 用于判断在哪里启动、在哪里断开
if index - sp > 0:
start = index - sp
else:
start = 0
if len(key[0]) > sp * 2:
end = key[0][-sp:]
else:
end = ''
# 判断有没有传需要匹配的字符串,有则筛选、无则全返
if txt == '' and len(key[0]) >= sp:
show = key[0][0:sp] + " . . . " + end
show = show.replace('<', '')
elif txt == '' and len(key[0]) < sp:
show = key[0][0:sp]
show = show.replace('<', '')
else:
show = str(key[0][start:index + sp]).replace('<', '').replace(txt, html_tag_color(txt))
show += f" {html_tag_color(' X ' + str(key[1]))}"
if lst.get(key[0]):
be_value = lst[key[0]]
else:
be_value = None
value = be_value
dateset_list.append([show, key[0], value, key[1]])
return dateset_list
def prompt_upload_refresh(file, prompt, ipaddr: gr.Request):
"""
上传文件将文件转换为字典然后存储到数据库并刷新Prompt区域
Args:
file 上传的文件
prompt 原始prompt对象
ipaddripaddr用户请求信息
Returns:
注册函数所需的元祖对象
"""
hosts = ipaddr.client.host
if file.name.endswith('json'):
upload_data = check_json_format(file.name)
elif file.name.endswith('yaml'):
upload_data = YamlHandle(file.name).load()
else:
upload_data = {}
if upload_data != {}:
SqliteHandle(f'prompt_{hosts}').inset_prompt(upload_data)
ret_data = prompt_retrieval(is_all=['个人'], hosts=hosts)
return prompt.update(samples=ret_data, visible=True), prompt, ['个人']
else:
prompt.samples = [[f'{html_tag_color("数据解析失败,请检查文件是否符合规范", color="red")}', '']]
return prompt.samples, prompt, []
def prompt_retrieval(is_all, hosts='', search=False):
"""
上传文件将文件转换为字典然后存储到数据库并刷新Prompt区域
Args:
is_all prompt类型
hosts 查询的用户ip
search支持搜索搜索时将key作为key
Returns:
返回一个列表
"""
count_dict = {}
if '所有人' in is_all:
for tab in SqliteHandle('ai_common').get_tables():
if tab.startswith('prompt'):
data = SqliteHandle(tab).get_prompt_value(None)
if data: count_dict.update(data)
elif '个人' in is_all:
data = SqliteHandle(f'prompt_{hosts}').get_prompt_value(None)
if data: count_dict.update(data)
retrieval = []
if count_dict != {}:
for key in count_dict:
if not search:
retrieval.append([key, count_dict[key]])
else:
retrieval.append([count_dict[key], key])
return retrieval
else:
return retrieval
def prompt_reduce(is_all, prompt: gr.Dataset, ipaddr: gr.Request): # is_all, ipaddr: gr.Request
"""
上传文件将文件转换为字典然后存储到数据库并刷新Prompt区域
Args:
is_all prompt类型
prompt dataset原始对象
ipaddr请求用户信息
Returns:
返回注册函数所需的对象
"""
data = prompt_retrieval(is_all=is_all, hosts=ipaddr.client.host)
prompt.samples = data
return prompt.update(samples=data, visible=True), prompt, is_all
def prompt_save(txt, name, prompt: gr.Dataset, ipaddr: gr.Request):
"""
编辑和保存Prompt
Args:
txt Prompt正文
name Prompt的名字
prompt dataset原始对象
ipaddr请求用户信息
Returns:
返回注册函数所需的对象
"""
if txt and name:
yaml_obj = SqliteHandle(f'prompt_{ipaddr.client.host}')
yaml_obj.inset_prompt({name: txt})
result = prompt_retrieval(is_all=['个人'], hosts=ipaddr.client.host)
prompt.samples = result
return "", "", ['个人'], prompt.update(samples=result, visible=True), prompt, gr.Tabs.update(selected='chatbot')
elif not txt or not name:
result = [[f'{html_tag_color("编辑框 or 名称不能为空!!!!!", color="red")}', '']]
prompt.samples = [[f'{html_tag_color("编辑框 or 名称不能为空!!!!!", color="red")}', '']]
return txt, name, [], prompt.update(samples=result, visible=True), prompt, gr.Tabs.update(selected='chatbot')
def prompt_input(txt: str, prompt_str, name_str, index, data: gr.Dataset, tabs_index):
"""
点击dataset的值使用Prompt
Args:
txt 输入框正文
index 点击的Dataset下标
data dataset原始对象
Returns:
返回注册函数所需的对象
"""
data_str = str(data.samples[index][1])
data_name = str(data.samples[index][0])
rp_str = '{{{v}}}'
def str_v_handle(__str):
if data_str.find(rp_str) != -1 and __str:
txt_temp = data_str.replace(rp_str, __str)
elif __str:
txt_temp = data_str + '\n' + __str
else:
txt_temp = data_str
return txt_temp
if tabs_index == 1:
new_txt = str_v_handle(prompt_str)
return txt, new_txt, data_name
else:
new_txt = str_v_handle(txt)
return new_txt, prompt_str, name_str
def copy_result(history):
"""复制history"""
if history != []:
pyperclip.copy(history[-1])
return '已将结果复制到剪切板'
else:
return "无对话记录,复制错误!!"
def str_is_list(s):
try:
list_ast = ast.literal_eval(s)
return isinstance(list_ast, list)
except (SyntaxError, ValueError):
return False
def show_prompt_result(index, data: gr.Dataset, chatbot, pro_edit, pro_name):
"""
查看Prompt的对话记录结果
Args:
index 点击的Dataset下标
data dataset原始对象
chatbot聊天机器人
Returns:
返回注册函数所需的对象
"""
click = data.samples[index]
if str_is_list(click[2]):
list_copy = eval(click[2])
for i in range(0, len(list_copy), 2):
if i + 1 >= len(list_copy): # 如果下标越界了,单独处理最后一个元素
chatbot.append([list_copy[i]])
else:
chatbot.append([list_copy[i], list_copy[i + 1]])
elif click[2] is None and pro_edit == '':
pro_edit = click[1]
pro_name = click[3]
else:
chatbot.append((click[1], click[2]))
return chatbot, pro_edit, pro_name
def pattern_html(html):
bs = BeautifulSoup(str(html), 'html.parser')
md_message = bs.find('div', {'class': 'md-message'})
if md_message:
return md_message.get_text(separator='')
else:
return ""
def thread_write_chat(chatbot, history):
"""
对话记录写入数据库
"""
chatbot, history = copy.copy(chatbot), copy.copy(history)
private_key = toolbox.get_conf('private_key')[0]
chat_title = chatbot[0][1].split()
i_say = pattern_html(chatbot[-1][0])
if history:
gpt_result = history
else: # 如果历史对话不存在,那么读取对话框
gpt_result = [pattern_html(v) for i in chatbot for v in i]
if private_key in chat_title:
SqliteHandle(f'ai_private_{chat_title[-2]}').inset_prompt({i_say: gpt_result})
else:
SqliteHandle(f'ai_common_{chat_title[-2]}').inset_prompt({i_say: gpt_result})
base_path = os.path.dirname(__file__)
prompt_path = os.path.join(base_path, 'users_data')
users_path = os.path.join(base_path, 'private_upload')
logs_path = os.path.join(base_path, 'gpt_log')
def reuse_chat(result, chatbot, history, pro_numb, say):
"""复用对话记录"""
if result is None or result == []:
return chatbot, history, gr.update(), gr.update(), '', gr.Column.update()
else:
if pro_numb:
chatbot += result
history += [pattern_html(_) for i in result for _ in i]
else:
chatbot.append(result[-1])
history += [pattern_html(_) for i in result[-2:] for _ in i]
print(chatbot[-1][0])
return chatbot, history, say, gr.Tabs.update(selected='chatbot'), '', gr.Column.update(visible=False)
def num_tokens_from_string(listing: list, encoding_name: str = 'cl100k_base') -> int:
"""Returns the number of tokens in a text string."""
count_tokens = 0
for i in listing:
encoding = tiktoken.get_encoding(encoding_name)
count_tokens += len(encoding.encode(i))
return count_tokens
def spinner_chatbot_loading(chatbot):
loading = [''.join(['.' * random.randint(1, 5)])]
# 将元组转换为列表并修改元素
loading_msg = copy.deepcopy(chatbot)
temp_list = list(loading_msg[-1])
temp_list[1] = pattern_html(temp_list[1]) + f'{random.choice(loading)}'
# 将列表转换回元组并替换原始元组
loading_msg[-1] = tuple(temp_list)
return loading_msg
def refresh_load_data(chat, history, prompt, crazy_list, request: gr.Request):
"""
Args:
chat: 聊天组件
history: 对话记录
prompt: prompt dataset组件
Returns:
预期是每次刷新页面,加载最新
"""
is_all = toolbox.get_conf('prompt_list')[0]['key'][0]
data = prompt_retrieval(is_all=[is_all])
prompt.samples = data
selected = random.sample(crazy_list, 4)
user_agent = request.kwargs['headers']['user-agent'].lower()
if user_agent.find('android') != -1 or user_agent.find('iphone') != -1:
hied_elem = gr.update(visible=False)
else:
hied_elem = gr.update()
outputs = [prompt.update(samples=data, visible=True), prompt,
chat, history, gr.Dataset.update(samples=[[i] for i in selected]), selected,
hied_elem, hied_elem]
return outputs
def txt_converter_json(input_string):
try:
if input_string.startswith("{") and input_string.endswith("}"):
# 尝试将字符串形式的字典转换为字典对象
dict_object = ast.literal_eval(input_string)
else:
# 尝试将字符串解析为JSON对象
dict_object = json.loads(input_string)
formatted_json_string = json.dumps(dict_object, indent=4, ensure_ascii=False)
return formatted_json_string
except (ValueError, SyntaxError):
return input_string
def clean_br_string(s):
s = re.sub('<\s*br\s*/?>', '\n', s) # 使用正则表达式同时匹配<br>、<br/>、<br />、< br>和< br/>
return s
def update_btn(self,
value: str = None,
variant: str = None,
visible: bool = None,
interactive: bool = None,
elem_id: str = None,
label: str = None
):
if not variant: variant = self.variant
if not visible: visible = self.visible
if not value: value = self.value
if not interactive: interactive = self.interactive
if not elem_id: elem_id = self.elem_id
if not elem_id: label = self.label
return {
"variant": variant,
"visible": visible,
"value": value,
"interactive": interactive,
'elem_id': elem_id,
'label': label,
"__type__": "update",
}
def update_txt(self,
value: str = None,
lines: int = None,
max_lines: int = None,
placeholder: str = None,
label: str = None,
show_label: bool = None,
visible: bool = None,
interactive: bool = None,
type: str = None,
elem_id: str = None
):
return {
"lines": self.lines,
"max_lines": self.max_lines,
"placeholder": self.placeholder,
"label": self.label,
"show_label": self.show_label,
"visible": self.visible,
"value": self.value,
"type": self.type,
"interactive": self.interactive,
"elem_id": elem_id,
"__type__": "update",
}
def get_html(filename):
path = os.path.join(base_path, "docs/assets", "html", filename)
if os.path.exists(path):
with open(path, encoding="utf8") as file:
return file.read()
return ""
def git_log_list():
ll = Shell("git log --pretty=format:'%s | %h' -n 10").read()[1].splitlines()
return [i.split('|') for i in ll if 'branch' not in i][:5]
import qrcode
from PIL import Image, ImageDraw
def qr_code_generation(data, icon_path=None, file_name='qc_icon.png'):
# 创建qrcode对象
qr = qrcode.QRCode(version=2, error_correction=qrcode.constants.ERROR_CORRECT_Q, box_size=10, border=2,)
qr.add_data(data)
# 创建二维码图片
img = qr.make_image()
# 图片转换为RGBA格式
img = img.convert('RGBA')
# 返回二维码图片的大小
img_w, img_h = img.size
# 打开logo
if not icon_path:
icon_path = os.path.join(base_path, 'docs/assets/PLAI.jpeg')
logo = Image.open(icon_path)
# logo大小为二维码的四分之一
logo_w = img_w // 4
logo_h = img_w // 4
# 修改logo图片大小
logo = logo.resize((logo_w, logo_h), Image.LANCZOS) # or Image.Resampling.LANCZOS
# 把logo放置在二维码中间
w = (img_w - logo_w) // 2
h = (img_h - logo_h) // 2
img.paste(logo, (w, h))
qr_path = os.path.join(logs_path, 'file_name')
img.save()
return qr_path
class YamlHandle:
def __init__(self, file=os.path.join(prompt_path, 'ai_common.yaml')):
if not os.path.exists(file):
Shell(f'touch {file}').read()
self.file = file
self._load = self.load()
def load(self) -> dict:
with open(file=self.file, mode='r') as f:
data = yaml.safe_load(f)
return data
def update(self, key, value):
date = self._load
if not date:
date = {}
date[key] = value
with open(file=self.file, mode='w') as f:
yaml.dump(date, f, allow_unicode=True)
return date
def dump_dict(self, new_dict):
date = self._load
if not date:
date = {}
date.update(new_dict)
with open(file=self.file, mode='w') as f:
yaml.dump(date, f, allow_unicode=True)
return date
class JsonHandle:
def __init__(self, file):
self.file = file
def load(self) -> object:
with open(self.file, 'r') as f:
data = json.load(f)
return data
if __name__ == '__main__':
pass

View File

@ -130,9 +130,9 @@ def main():
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
return ret
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, clearBtn, clearBtn2, plugin_advanced_arg] )
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
# 整理反复出现的控件句柄组合
input_combo = [cookies, max_length_sl, md_dropdown, txt, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
output_combo = [cookies, chatbot, history, status]
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
# 提交按钮、重置按钮
@ -155,7 +155,7 @@ def main():
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
cancel_handles.append(click_handle)
# 文件上传区接收文件后与chatbot的互动
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt ], [chatbot, txt])
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2])
# 函数插件-固定按钮区
for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue
@ -174,7 +174,7 @@ def main():
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
def on_md_dropdown_changed(k):
return {chatbot: gr.update(label="当前模型:"+k)}
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot])
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
# 随变按钮的回调函数注册
def route(k, *args, **kwargs):
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return

View File

@ -1,102 +0,0 @@
#! .\venv\
# encoding: utf-8
# @Time : 2023/4/19
# @Author : Spike
# @Descr :
import os.path
import sqlite3
import threading
import functools
import func_box
# 连接到数据库
base_path = os.path.dirname(__file__)
prompt_path = os.path.join(base_path, 'users_data')
def connect_db_close(cls_method):
@functools.wraps(cls_method)
def wrapper(cls=None, *args, **kwargs):
cls._connect_db()
result = cls_method(cls, *args, **kwargs)
cls._close_db()
return result
return wrapper
class SqliteHandle:
def __init__(self, table='ai_common', database='ai_prompt.db'):
self.__database = database
self.__connect = sqlite3.connect(os.path.join(prompt_path, self.__database))
self.__cursor = self.__connect.cursor()
self.__table = table
if self.__table not in self.get_tables():
self.create_tab()
def new_connect_db(self):
"""多线程操作时每个线程新建独立的connect"""
self.__connect = sqlite3.connect(os.path.join(prompt_path, self.__database))
self.__cursor = self.__connect.cursor()
def new_close_db(self):
self.__cursor.close()
self.__connect.close()
def create_tab(self):
self.__cursor.execute(f"CREATE TABLE `{self.__table}` ('prompt' TEXT UNIQUE, 'result' TEXT)")
def get_tables(self):
all_tab = []
result = self.__cursor.execute("SELECT name FROM sqlite_master WHERE type = 'table';")
for tab in result:
all_tab.append(tab[0])
return all_tab
def get_prompt_value(self, find=None):
temp_all = {}
if find:
result = self.__cursor.execute(f"SELECT prompt, result FROM `{self.__table}` WHERE prompt LIKE '%{find}%'").fetchall()
else:
result = self.__cursor.execute(f"SELECT prompt, result FROM `{self.__table}`").fetchall()
for row in result:
temp_all[row[0]] = row[1]
return temp_all
def inset_prompt(self, prompt: dict):
for key in prompt:
self.__cursor.execute(f"REPLACE INTO `{self.__table}` (prompt, result) VALUES (?, ?);", (str(key), str(prompt[key])))
self.__connect.commit()
def delete_prompt(self, name):
self.__cursor.execute(f"DELETE from `{self.__table}` where prompt LIKE '{name}'")
self.__connect.commit()
def delete_tabls(self, tab):
self.__cursor.execute(f"DROP TABLE `{tab}`;")
self.__connect.commit()
def find_prompt_result(self, name):
query = self.__cursor.execute(f"SELECT result FROM `{self.__table}` WHERE prompt LIKE '{name}'").fetchall()
if query == []:
query = self.__cursor.execute(f"SELECT result FROM `prompt_127.0.0.1` WHERE prompt LIKE '{name}'").fetchall()
return query[0][0]
else:
return query[0][0]
def cp_db_data(incloud_tab='prompt'):
sql_ll = sqlite_handle(database='ai_prompt_cp.db')
tabs = sql_ll.get_tables()
for i in tabs:
if str(i).startswith(incloud_tab):
old_data = sqlite_handle(table=i, database='ai_prompt_cp.db').get_prompt_value()
sqlite_handle(table=i).inset_prompt(old_data)
def inset_127_prompt():
sql_handle = sqlite_handle(table='prompt_127.0.0.1')
prompt_json = os.path.join(prompt_path, 'prompts-PlexPt.json')
data_list = func_box.JsonHandle(prompt_json).load()
for i in data_list:
sql_handle.inset_prompt(prompt={i['act']: i['prompt']})
sqlite_handle = SqliteHandle
if __name__ == '__main__':
cp_db_data()

View File

@ -13,11 +13,8 @@ from functools import lru_cache
from concurrent.futures import ThreadPoolExecutor
from toolbox import get_conf, trimmed_format_exc
from request_llm.bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from request_llm.bridge_chatgpt import predict as chatgpt_ui
from .bridge_azure_test import predict_no_ui_long_connection as azure_noui
from .bridge_azure_test import predict as azure_ui
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from .bridge_chatgpt import predict as chatgpt_ui
from .bridge_azure_test import predict_no_ui_long_connection as azure_noui
from .bridge_azure_test import predict as azure_ui
@ -54,11 +51,10 @@ class LazyloadTiktoken(object):
return encoder.decode(*args, **kwargs)
# Endpoint 重定向
API_URL_REDIRECT, PROXY_API_URL = get_conf("API_URL_REDIRECT", 'PROXY_API_URL')
API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
openai_endpoint = "https://api.openai.com/v1/chat/completions"
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
proxy_endpoint = PROXY_API_URL
# 兼容旧版的配置
try:
API_URL, = get_conf("API_URL")
@ -73,7 +69,6 @@ if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_e
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
# 获取tokenizer
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
@ -127,15 +122,6 @@ model_info = {
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
# azure openai
"azure-gpt35":{
"fn_with_ui": azure_ui,
"fn_without_ui": azure_noui,
"endpoint": get_conf("AZURE_ENDPOINT"),
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# azure openai
"azure-gpt35":{
@ -161,9 +147,9 @@ model_info = {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": api2d_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
"max_token": 8192,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
# 将 chatglm 直接对齐到 chatglm2

View File

@ -12,14 +12,12 @@
"""
import json
import random
import time
import gradio as gr
import logging
import traceback
import requests
import importlib
import func_box
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控如果有则覆盖原config文件
@ -30,6 +28,7 @@ proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
def get_full_error(chunk, stream_response):
"""
获取完整的从Openai返回的报错
@ -42,7 +41,9 @@ def get_full_error(chunk, stream_response):
return chunk
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
def predict_no_ui_long_connection(
inputs, llm_kwargs, history=None, sys_prompt="", observe_window=None, console_slience=False
):
"""
发送至chatGPT等待回复一次性完成不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
@ -56,45 +57,59 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
observe_window = None
用于负责跨越线程传递已经输出的部分大部分时候仅仅为了fancy的视觉效果留空即可。observe_window[0]观测窗。observe_window[1]:看门狗
"""
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
if history is None:
history = []
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
retry = 0
from bridge_all import model_info
while True:
try:
# make a POST request to the API endpoint, stream=False
from request_llm.bridge_all import model_info
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
response = requests.post(endpoint, headers=headers, proxies=proxies,
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
except requests.exceptions.ReadTimeout as e:
json=payload, stream=True, timeout=TIMEOUT_SECONDS)
stream_response = response.iter_lines()
break
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError):
retry += 1
traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
if retry > MAX_RETRY:
raise TimeoutError
if MAX_RETRY != 0:
print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
except Exception as e:
print(f"出现异常:{e}")
raise e
stream_response = response.iter_lines()
result = ''
while True:
try: chunk = next(stream_response).decode()
try:
chunk = next(stream_response).decode()
except StopIteration:
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
if len(chunk)==0: continue
# except requests.exceptions.ConnectionError:
# chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
if len(chunk) == 0:
continue
if not chunk.startswith('data:'):
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
if "reduce the length" in error_msg:
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
else:
raise RuntimeError("OpenAI拒绝了请求" + error_msg)
if ('data: [DONE]' in chunk): break # api2d 正常完成
if 'data: [DONE]' in chunk:
break # api2d 正常完成
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
delta = json_data["delta"]
if len(delta) == 0: break
if "role" in delta: continue
if len(delta) == 0:
break
if "role" in delta:
continue
if "content" in delta:
result += delta["content"]
if not console_slience: print(delta["content"], end='')
if not console_slience:
print(delta["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1: observe_window[0] += delta["content"]
@ -102,13 +117,14 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
else: raise RuntimeError("意外Json结构"+delta)
else:
raise RuntimeError("意外Json结构"+delta)
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("正常结束但显示Token不足导致输出不完整请削减单次输入的文本量。")
return result
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
发送至chatGPT流式获取输出。
用于基础的对话功能。
@ -136,22 +152,24 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
raw_input = inputs
logging.info(f'[raw_input]_{llm_kwargs["ipaddr"]} {raw_input}')
logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
loading_msg = func_box.spinner_chatbot_loading(chatbot)
yield from update_ui(chatbot=loading_msg, history=history, msg="等待响应") # 刷新界面
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
try:
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
except RuntimeError as e:
chatbot[-1] = (inputs, f"您提供的api-key不满足要求不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
return
history.append(inputs); history.append("")
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=True
from request_llm.bridge_all import model_info
from .bridge_all import model_info
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
response = requests.post(endpoint, headers=headers, proxies=proxies,
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
@ -163,6 +181,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if retry > MAX_RETRY: raise TimeoutError
gpt_replying_buffer = ""
is_head_of_the_stream = True
if stream:
stream_response = response.iter_lines()
@ -187,19 +206,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
# 前者API2D的
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
# 判定为数据流的结束gpt_replying_buffer也写完了
logging.info(f'[response]_{llm_kwargs["ipaddr"]} {gpt_replying_buffer}')
logging.info(f'[response] {gpt_replying_buffer}')
break
# 处理数据流的主体
chunkjson = json.loads(chunk_decoded[6:])
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
# 如果这里抛出异常一般是文本过长详情见get_full_error的输出
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
count_time = round(time.time() - llm_kwargs['start_time'], 3)
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}\t" \
f"本次对话耗时: {func_box.html_tag_color(tag=f'{count_time}s')}"
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
except Exception as e:
traceback.print_exc()
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
@ -228,9 +245,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
return
count_tokens = func_box.num_tokens_from_string(listing=history)
status_text += f'\t 本次对话使用tokens: {func_box.html_tag_color(count_tokens)}'
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
@ -238,41 +253,32 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
if not is_any_api_key(llm_kwargs['api_key']):
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案直接在输入区键入api_key然后回车提交。\n\n2. 长效解决方案在config.py中配置。")
if llm_kwargs['llm_model'].startswith('proxy-'):
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
headers = {
"Content-Type": "application/json",
"api-key": f"{api_key}"
}
else:
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index+1]
what_i_have_asked = {"role": "user", "content": history[index]}
what_gpt_answer = {"role": "assistant", "content": history[index + 1]}
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
if what_gpt_answer["content"] == timeout_bot_msg: continue
if what_gpt_answer["content"] == "":
continue
if what_gpt_answer["content"] == timeout_bot_msg:
continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
what_i_ask_now = {"role": "user", "content": inputs}
messages.append(what_i_ask_now)
payload = {
@ -286,20 +292,9 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"frequency_penalty": 0,
}
try:
print("\033[1;35m", f"{llm_kwargs['llm_model']}_{llm_kwargs['ipaddr']} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
except Exception as e:
print(f'输入中可能存在乱码。抛出异常: {e}')
return headers, payload
if __name__ == '__main__':
llm_kwargs = {
'api_key': 'sk-',
'llm_model': 'gpt-3.5-turbo',
'top_p': 1,
'max_length': 512,
'temperature': 1,
# 'ipaddr': ipaddr.client.host
}
chat = []
predict('你好', llm_kwargs=llm_kwargs, chatbot=chat, plugin_kwargs={})
print(chat)

View File

@ -1,4 +1,4 @@
./docs/gradio-3.32.2-py3-none-any.whl
gradio>=3.33.1
tiktoken>=0.3.3
requests[socks]
transformers
@ -15,11 +15,6 @@ pymupdf
openai
numpy
arxiv
pymupdf
pyperclip
scikit-learn
psutil
distro
python-dotenv
rich
Levenshtein
langchain
zh_langchain

235
theme.py
View File

@ -1,6 +1,6 @@
import gradio as gr
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU, ADD_CHUANHU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'ADD_CHUANHU')
CODE_HIGHLIGHT, ADD_WAIFU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU')
# gradio可用颜色列表
# gr.themes.utils.colors.slate (石板色)
# gr.themes.utils.colors.gray (灰色)
@ -29,185 +29,105 @@ CODE_HIGHLIGHT, ADD_WAIFU, ADD_CHUANHU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU',
def adjust_theme():
try:
set_theme = gr.themes.Soft(
primary_hue=gr.themes.Color(
c50="#EBFAF2",
c100="#CFF3E1",
c200="#A8EAC8",
c300="#77DEA9",
c400="#3FD086",
c500="#02C160",
c600="#06AE56",
c700="#05974E",
c800="#057F45",
c900="#04673D",
c950="#2E5541",
name="small_and_beautiful",
),
secondary_hue=gr.themes.Color(
c50="#576b95",
c100="#576b95",
c200="#576b95",
c300="#576b95",
c400="#576b95",
c500="#576b95",
c600="#576b95",
c700="#576b95",
c800="#576b95",
c900="#576b95",
c950="#576b95",
),
neutral_hue=gr.themes.Color(
name="gray",
c50="#f6f7f8",
# c100="#f3f4f6",
c100="#F2F2F2",
c200="#e5e7eb",
c300="#d1d5db",
c400="#B2B2B2",
c500="#808080",
c600="#636363",
c700="#515151",
c800="#393939",
# c900="#272727",
c900="#2B2B2B",
c950="#171717",
),
radius_size=gr.themes.sizes.radius_sm,
).set(
button_primary_background_fill="*primary_500",
button_primary_background_fill_dark="*primary_600",
button_primary_background_fill_hover="*primary_400",
button_primary_border_color="*primary_500",
button_primary_border_color_dark="*primary_600",
button_primary_text_color="wihte",
button_primary_text_color_dark="white",
button_secondary_background_fill="*neutral_100",
button_secondary_background_fill_hover="*neutral_50",
button_secondary_background_fill_dark="*neutral_900",
button_secondary_text_color="*neutral_800",
button_secondary_text_color_dark="white",
background_fill_primary="#F7F7F7",
background_fill_primary_dark="#1F1F1F",
block_title_text_color="*primary_500",
block_title_background_fill_dark="*primary_900",
block_label_background_fill_dark="*primary_900",
input_background_fill="#F6F6F6",
chatbot_code_background_color="*neutral_950",
chatbot_code_background_color_dark="*neutral_950",
color_er = gr.themes.utils.colors.fuchsia
set_theme = gr.themes.Default(
primary_hue=gr.themes.utils.colors.orange,
neutral_hue=gr.themes.utils.colors.gray,
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
set_theme.set(
# Colors
input_background_fill_dark="*neutral_800",
# Transition
button_transition="none",
# Shadows
button_shadow="*shadow_drop",
button_shadow_hover="*shadow_drop_lg",
button_shadow_active="*shadow_inset",
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
checkbox_label_shadow="*shadow_drop",
block_shadow="*shadow_drop",
form_gap_width="1px",
# Button borders
input_border_width="1px",
input_background_fill="white",
# Gradients
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
error_background_fill_dark="*background_fill_primary",
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
button_primary_border_color_dark="*primary_500",
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
button_cancel_border_color=color_er.c200,
button_cancel_border_color_dark=color_er.c600,
button_cancel_text_color=color_er.c600,
button_cancel_text_color_dark="white",
)
js = ''
if ADD_CHUANHU:
with open("./docs/assets/custom.js", "r", encoding="utf-8") as f, \
open("./docs/assets/external-scripts.js", "r", encoding="utf-8") as f1:
customJS = f.read()
externalScripts = f1.read()
js += f'<script>{customJS}</script><script async>{externalScripts}</script>'
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js += """
js = """
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script>
"""
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except:
set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色')
return set_theme
with open("docs/assets/custom.css", "r", encoding="utf-8") as f:
customCSS = f.read()
custom_css = customCSS
advanced_css = """
#debug_mes {
position: absolute;
bottom: 0;
left: 0;
width: 100%;
z-index: 1; /* 设置更高的 z-index 值 */
margin-bottom: 10px !important;
}
#chat_txt {
display: flex;
flex-direction: column-reverse;
overflow-y: auto !important;
z-index: 3;
flex-grow: 1; /* 自动填充剩余空间 */
position: absolute;
bottom: 0;
left: 0;
width: 100%;
margin-bottom: 35px !important;
}
#sm_btn {
display: flex;
flex-wrap: unset !important;
gap: 5px !important;
width: var(--size-full);
}
textarea {
resize: none;
height: 100%; /* 填充父元素的高度 */
}
#main_chatbot {
height: 75vh !important;
max-height: 75vh !important;
/* overflow: auto !important; */
z-index: 2;
}
#prompt_result{
height: 60vh !important;
max-height: 60vh !important;
}
.wrap.svelte-18telvq.svelte-18telvq {
padding: var(--block-padding) !important;
height: 100% !important;
max-height: 95% !important;
overflow-y: auto !important;
}
.app.svelte-1mya07g.svelte-1mya07g {
max-width: 100%;
position: relative;
/* margin: auto; */
padding: var(--size-4);
width: 100%;
height: 100%;
}
.md-message table {
.markdown-body table {
margin: 1em 0;
border-collapse: collapse;
empty-cells: show;
}
.md-message th, .md-message td {
.markdown-body th, .markdown-body td {
border: 1.2px solid var(--border-color-primary);
padding: 5px;
}
.md-message thead {
.markdown-body thead {
background-color: rgba(175,184,193,0.2);
}
.md-message thead th {
.markdown-body thead th {
padding: .5em .2em;
}
.md-message ol, .md-message ul {
.markdown-body ol, .markdown-body ul {
padding-inline-start: 2em !important;
}
/* chat box. */
[class *= "message"] {
gap: 7px !important;
border-radius: var(--radius-xl) !important;
/* padding: var(--spacing-xl) !important; */
/* font-size: var(--text-md) !important; */
@ -217,40 +137,27 @@ textarea {
}
[data-testid = "bot"] {
max-width: 95%;
letter-spacing: 0.5px;
font-weight: normal;
/* width: auto !important; */
border-bottom-left-radius: 0 !important;
}
.dark [data-testid = "bot"] {
max-width: 95%;
color: #ccd2db !important;
letter-spacing: 0.5px;
font-weight: normal;
/* width: auto !important; */
border-bottom-left-radius: 0 !important;
}
[data-testid = "user"] {
max-width: 100%;
letter-spacing: 0.5px;
/* width: auto !important; */
border-bottom-right-radius: 0 !important;
}
/* linein code block. */
.md-message code {
.markdown-body code {
display: inline;
white-space: break-spaces;
border-radius: 6px;
margin: 0 2px 0 2px;
padding: .2em .4em .1em .4em;
background-color: rgba(13, 17, 23, 0.95);
color: #eff0f2;
color: #c9d1d9;
}
.dark .md-message code {
.dark .markdown-body code {
display: inline;
white-space: break-spaces;
border-radius: 6px;
@ -260,7 +167,7 @@ textarea {
}
/* code block css */
.md-message pre code {
.markdown-body pre code {
display: block;
overflow: auto;
white-space: pre;
@ -270,7 +177,7 @@ textarea {
margin: 1em 2em 1em 0.5em;
}
.dark .md-message pre code {
.dark .markdown-body pre code {
display: block;
overflow: auto;
white-space: pre;

View File

@ -1,18 +1,11 @@
import html
import markdown
import importlib
import time
import inspect
import gradio as gr
import func_box
import re
import os
from latex2mathml.converter import convert as tex2mathml
from functools import wraps, lru_cache
import shutil
import os
import time
import glob
import sys
import threading
############################### 插件输入输出接驳区 #######################################
pj = os.path.join
"""
@ -28,6 +21,7 @@ pj = os.path.join
========================================================================
"""
class ChatBotWithCookies(list):
def __init__(self, cookie):
self._cookies = cookie
@ -47,68 +41,44 @@ def ArgsGeneralWrapper(f):
"""
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
"""
def decorated(cookies, max_length, llm_model, txt, top_p, temperature,
chatbot, history, system_prompt, models, plugin_advanced_arg, ipaddr: gr.Request, *args):
""""""
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
txt_passon = txt
if txt == "" and txt2 != "": txt_passon = txt2
# 引入一个有cookie的chatbot
start_time = time.time()
encrypt, private = get_conf('switch_model')[0]['key']
private_key, = get_conf('private_key')
cookies.update({
'top_p':top_p,
'temperature':temperature,
})
llm_kwargs = {
'api_key': cookies['api_key'],
'llm_model': llm_model,
'top_p':top_p,
'max_length': max_length,
'temperature': temperature,
'ipaddr': ipaddr.client.host,
'start_time': start_time
'temperature':temperature,
}
plugin_kwargs = {
"advanced_arg": plugin_advanced_arg,
"parameters_def": ''
}
if len(args) > 1:
plugin_kwargs.update({'parameters_def': args[1]})
transparent_address_private = f'<p style="display:none;">\n{private_key}\n{ipaddr.client.host}\n</p>'
transparent_address = f'<p style="display:none;">\n{ipaddr.client.host}\n</p>'
if private in models:
if chatbot == []:
chatbot.append([None, f'隐私模式, 你的对话记录无法被他人检索 {transparent_address_private}'])
else:
chatbot[0] = [None, f'隐私模式, 你的对话记录无法被他人检索 {transparent_address_private}']
else:
if chatbot == []:
chatbot.append([None, f'正常对话模式, 你接来下的对话将会被记录并且可以被所有人检索你可以到Settings中选择隐私模式 {transparent_address}'])
else:
chatbot[0] = [None, f'正常对话模式, 你接来下的对话将会被记录并且可以被所有人检索你可以到Settings中选择隐私模式 {transparent_address}']
chatbot_with_cookie = ChatBotWithCookies(cookies)
chatbot_with_cookie.write_list(chatbot)
txt_passon = txt
if encrypt in models: txt_passon = func_box.encryption_str(txt)
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
return decorated
def update_ui(chatbot, history, msg='正常', *args): # 刷新界面
def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
"""
刷新用户界面
"""
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时可用clear将其清空然后用for+append循环重新赋值。"
yield chatbot.get_cookies(), chatbot, history, msg
threading.Thread(target=func_box.thread_write_chat, args=(chatbot, history)).start()
# func_box.thread_write_chat(chatbot, history)
def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面
"""
刷新用户界面
"""
if len(chatbot) == 0: chatbot.append(["update_ui_last_msg", lastmsg])
if len(chatbot) == 0:
chatbot.append(["update_ui_last_msg", lastmsg])
chatbot[-1] = list(chatbot[-1])
chatbot[-1][-1] = lastmsg
yield from update_ui(chatbot=chatbot, history=history)
@ -116,24 +86,25 @@ def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面
def trimmed_format_exc():
import os, traceback
str = traceback.format_exc()
import os
import traceback
_str = traceback.format_exc()
current_path = os.getcwd()
replace_path = "."
return str.replace(current_path, replace_path)
return _str.replace(current_path, replace_path)
def CatchException(f):
"""
装饰器函数捕捉函数f中的异常并封装到一个生成器中返回并显示到聊天当中。
"""
@wraps(f)
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT=-1):
try:
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
except Exception as e:
from check_proxy import check_proxy
from toolbox import get_conf
# from toolbox import get_conf # 不需要导入本文件内容
proxies, = get_conf('proxies')
tb_str = '```\n' + trimmed_format_exc() + '```'
if len(chatbot) == 0:
@ -141,7 +112,7 @@ def CatchException(f):
chatbot.append(["插件调度异常", "异常原因"])
chatbot[-1] = (chatbot[-1][0],
f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
return decorated
@ -159,14 +130,9 @@ def HotReload(f):
def decorated(*args, **kwargs):
fn_name = f.__name__
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
try:
yield from f_hot_reload(*args, **kwargs)
except TypeError:
args = tuple(args[element] for element in range(len(args)) if element != 6)
yield from f_hot_reload(*args, **kwargs)
yield from f_hot_reload(*args, **kwargs)
return decorated
####################################### 其他小工具 #####################################
"""
========================================================================
@ -186,6 +152,7 @@ def HotReload(f):
========================================================================
"""
def get_reduce_token_percent(text):
"""
* 此函数未来将被弃用
@ -230,7 +197,8 @@ def write_results_to_file(history, file_name=None):
# remove everything that cannot be handled by utf8
f.write(content.encode('utf-8', 'ignore').decode())
f.write('\n\n')
res = '以上材料已经被写入' + f'./gpt_log/{file_name}'
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
print(res)
return res
@ -244,8 +212,6 @@ def regular_txt_to_markdown(text):
return text
def report_execption(chatbot, history, a, b):
"""
向chatbot中添加错误信息
@ -255,51 +221,38 @@ def report_execption(chatbot, history, a, b):
history.append(b)
import re
def text_divide_paragraph(input_str):
if input_str:
code_blocks = re.findall(r'```[\s\S]*?```', input_str)
def text_divide_paragraph(text):
"""
将文本按照段落分隔符分割开生成带有段落标签的HTML代码。
"""
pre = '<div class="markdown-body">'
suf = '</div>'
if text.startswith(pre) and text.endswith(suf):
return text
for i, block in enumerate(code_blocks):
input_str = input_str.replace(block, f'{{{{CODE_BLOCK_{i}}}}}')
if code_blocks:
sections = re.split(r'({{{{\w+}}}})', input_str)
for idx, section in enumerate(sections):
if 'CODE_BLOCK' in section or section.startswith(' '):
continue
sections[idx] = re.sub(r'(?!```)(?<!\n)\n(?!(\n|^)( {0,3}[\*\+\-]|[0-9]+\.))', '\n\n', section)
input_str = ''.join(sections)
for i, block in enumerate(code_blocks):
input_str = input_str.replace(f'{{{{CODE_BLOCK_{i}}}}}', block.replace('\n', '\n'))
else:
lines = input_str.split('\n')
for idx, line in enumerate(lines[:-1]):
if not line.strip():
continue
if not (lines[idx + 1].startswith(' ') or lines[idx + 1].startswith('\t')):
lines[idx] += '\n' # 将一个换行符替换为两个换行符
input_str = '\n'.join(lines)
return input_str
if '```' in text:
# careful input
return pre + text + suf
else:
# wtf input
lines = text.split("\n")
for i, line in enumerate(lines):
lines[i] = lines[i].replace(" ", "&nbsp;")
text = "</br>".join(lines)
return pre + text + suf
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
def markdown_convertion(txt):
"""
将Markdown格式的文本转换为HTML格式。如果包含数学公式则先将公式转换为HTML格式。
"""
pre = '<div class="md-message">'
pre = '<div class="markdown-body">'
suf = '</div>'
raw_pre = '<div class="raw-message hideM">'
raw_suf = '</div>'
if txt.startswith(pre) and txt.endswith(suf):
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
return txt # 已经被转化过,不需要再次转化
if txt.startswith(raw_pre) and txt.endswith(raw_suf):
return txt # 已经被转化过,不需要再次转化
raw_hide = raw_pre + txt + raw_suf
return txt # 已经被转化过,不需要再次转化
markdown_extension_configs = {
'mdx_math': {
'enable_dollar_delimiter': True,
@ -308,6 +261,13 @@ def markdown_convertion(txt):
}
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
def tex2mathml_catch_exception(content, *args, **kwargs):
try:
content = tex2mathml(content, *args, **kwargs)
except:
content = content
return content
def replace_math_no_render(match):
content = match.group(1)
if 'mode=display' in match.group(0):
@ -323,17 +283,16 @@ def markdown_convertion(txt):
content = content.replace('\\begin{aligned}', '\\begin{array}')
content = content.replace('\\end{aligned}', '\\end{array}')
content = content.replace('&', ' ')
content = tex2mathml(content, display="block")
content = tex2mathml_catch_exception(content, display="block")
return content
else:
return tex2mathml(content)
return tex2mathml_catch_exception(content)
def markdown_bug_hunt(content):
"""
解决一个mdx_math的bug单$包裹begin命令时多余<script>
"""
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">',
'<script type="math/tex; mode=display">')
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
content = content.replace('</script>\n</script>', '</script>')
return content
@ -341,29 +300,23 @@ def markdown_convertion(txt):
if '```' not in txt:
return True
else:
if '```reference' in txt:
return True # newbing
else:
return False
if '```reference' in txt: return True # newbing
else: return False
if ('$$' in txt) and no_code(txt): # 有$标识的公式符号,且没有代码段```的标识
if ('$' in txt) and no_code(txt): # 有$标识的公式符号,且没有代码段```的标识
# convert everything to html format
split = markdown.markdown(text='---')
txt = re.sub(r'\$\$((?:.|\n)*?)\$\$', lambda match: '$$' + re.sub(r'\n+', '</br>', match.group(1)) + '$$', txt)
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
# 1. convert to easy-to-copy tex (do not render math)
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
# 2. convert to rendered equation
convert_stage_1_resp = convert_stage_1.replace('</br>', '')
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1_resp, flags=re.DOTALL)
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
# cat them together
context = pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
return raw_hide + context # 破坏html 结构,并显示源码
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
else:
context = pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
return raw_hide + context # 破坏html 结构,并显示源码
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
def close_up_code_segment_during_stream(gpt_reply):
@ -377,9 +330,9 @@ def close_up_code_segment_during_stream(gpt_reply):
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
"""
if '```' not in str(gpt_reply):
if '```' not in gpt_reply:
return gpt_reply
if str(gpt_reply).endswith('```'):
if gpt_reply.endswith('```'):
return gpt_reply
# 排除了以上两个情况,我们
@ -405,8 +358,7 @@ def format_io(self, y):
if gpt_reply is not None: gpt_reply = close_up_code_segment_during_stream(gpt_reply)
# process
y[-1] = (
# None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
None if i_ask is None else markdown_convertion(i_ask),
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
None if gpt_reply is None else markdown_convertion(gpt_reply)
)
return y
@ -492,6 +444,7 @@ def find_recent_files(directory):
return recent_files
def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
# 将文件复制一份到下载区
import shutil
@ -505,50 +458,42 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
chatbot._cookies.update({'file_to_promote': [new_path] + current})
def get_user_upload(chatbot, ipaddr: gr.Request):
"""
获取用户上传过的文件
"""
private_upload = './private_upload'
user_history = os.path.join(private_upload, ipaddr.client.host)
history = """| 编号 | 目录 | 目录内文件 |\n| --- | --- | --- |\n"""
count_num = 1
for root, d, file in os.walk(user_history):
file_link = "<br>".join([f'{func_box.html_view_blank(f"{root}/{i}")}' for i in file])
history += f'| {count_num} | {root} | {file_link} |\n'
count_num += 1
chatbot.append(['Load Submission History....',
f'[Local Message] 请自行复制以下目录 or 目录+文件, 填入输入框以供函数区高亮按钮使用\n\n'
f'{func_box.html_tag_color("提交前记得请检查头尾空格哦~")}\n\n'
f'{history}'
])
return chatbot
def on_file_uploaded(files, chatbot, txt, ipaddr: gr.Request):
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
"""
当文件被上传时的回调函数
"""
if len(files) == 0:
return chatbot, txt
private_upload = './private_upload'
# shutil.rmtree('./private_upload/') 不需要删除文件
time_tag_path = os.path.join(private_upload, ipaddr.client.host, time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()))
os.makedirs(f'{time_tag_path}', exist_ok=True)
import shutil
import os
import time
import glob
from toolbox import extract_archive
try:
shutil.rmtree('./private_upload/')
except:
pass
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
err_msg = ''
for file in files:
file_origin_name = os.path.basename(file.orig_name)
shutil.copy(file.name, f'{time_tag_path}/{file_origin_name}')
err_msg += extract_archive(f'{time_tag_path}/{file_origin_name}',
dest_dir=f'{time_tag_path}/{file_origin_name}.extract')
moved_files = [fp for fp in glob.glob(f'{time_tag_path}/**/*', recursive=True)]
txt = f'{time_tag_path}'
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)]
if "底部输入区" in checkboxes:
txt = ""
txt2 = f'private_upload/{time_tag}'
else:
txt = f'private_upload/{time_tag}'
txt2 = ""
moved_files_str = '\t\n\n'.join(moved_files)
chatbot.append([None,
chatbot.append(['我上传了文件,请查收',
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
f'\n\n现在您点击任意“高亮”标识的函数插件时,以上文件将被作为输入参数'+err_msg])
return chatbot, txt
f'\n\n现在您点击任意“红颜色”标识的函数插件时,以上文件将被作为输入参数'+err_msg])
return chatbot, txt, txt2
def on_report_generated(cookies, files, chatbot):
@ -566,23 +511,19 @@ def on_report_generated(cookies, files, chatbot):
chatbot.append(['报告如何远程获取?', f'报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。{file_links}'])
return cookies, report_files, chatbot
def is_openai_api_key(key):
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
API_MATCH_AZURE = re.match(r"[a-zA-Z0-9]{32}$", key)
return bool(API_MATCH_ORIGINAL) or bool(API_MATCH_AZURE)
def is_api2d_key(key):
if key.startswith('fk') and len(key) == 41:
return True
else:
return False
def is_proxy_key(key):
if key.startswith('proxy-') and len(key) == 38:
return True
else:
return False
def is_any_api_key(key):
if ',' in key:
@ -591,7 +532,8 @@ def is_any_api_key(key):
if is_any_api_key(k): return True
return False
else:
return is_openai_api_key(key) or is_api2d_key(key) or is_proxy_key(key)
return is_openai_api_key(key) or is_api2d_key(key)
def what_keys(keys):
avail_key_list = {'OpenAI Key':0, "API2D Key":0}
@ -605,14 +547,8 @@ def what_keys(keys):
if is_api2d_key(k):
avail_key_list['API2D Key'] += 1
for k in key_list:
if is_proxy_key(k):
avail_key_list['Proxy Key'] += 1
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']}API2D Key {avail_key_list['API2D Key']}"
return f"检测到: \n" \
f"OpenAI Key {avail_key_list['OpenAI Key']}\n" \
f"API2D Key {avail_key_list['API2D Key']}\n" \
f"Proxy Key {avail_key_list['API2D Key']}\n"
def select_api_key(keys, llm_model):
import random
@ -627,16 +563,13 @@ def select_api_key(keys, llm_model):
for k in key_list:
if is_api2d_key(k): avail_key_list.append(k)
if llm_model.startswith('proxy-'):
for k in key_list:
if is_proxy_key(k): avail_key_list.append(k.replace('proxy-', ''))
if len(avail_key_list) == 0:
raise RuntimeError(f"您提供的api-key不满足要求不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源。")
api_key = random.choice(avail_key_list) # 随机负载均衡
return api_key
def read_env_variable(arg, default_value):
"""
环境变量可以是 `GPT_ACADEMIC_CONFIG`(优先),也可以直接是`CONFIG`
@ -691,6 +624,7 @@ def read_env_variable(arg, default_value):
print亮绿(f"[ENV_VAR] 成功读取环境变量{arg}")
return r
@lru_cache(maxsize=128)
def read_single_conf_with_lru_cache(arg):
from colorful import print亮红, print亮绿, print亮蓝
@ -701,12 +635,6 @@ def read_single_conf_with_lru_cache(arg):
except:
try:
# 优先级2. 获取config_private中的配置
# 获取当前文件所在目录的路径
current_dir = os.path.dirname(os.path.abspath(__file__))
# 获取上一层目录的路径
parent_dir = os.path.dirname(current_dir)
# 将上一层目录添加到Python的搜索路径中
sys.path.append(parent_dir)
r = getattr(importlib.import_module('config_private'), arg)
except:
# 优先级3. 获取config中的配置
@ -856,6 +784,7 @@ def clip_history(inputs, history, tokenizer, max_token_limit):
========================================================================
"""
def zip_folder(source_folder, dest_folder, zip_name):
import zipfile
import os
@ -887,6 +816,7 @@ def zip_folder(source_folder, dest_folder, zip_name):
print(f"Zip file created at {zip_file}")
def zip_result(folder):
import time
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
@ -897,6 +827,7 @@ def gen_time_str():
import time
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
class ProxyNetworkActivate():
"""
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
@ -916,15 +847,18 @@ class ProxyNetworkActivate():
if 'HTTPS_PROXY' in os.environ: os.environ.pop('HTTPS_PROXY')
return
def objdump(obj, file='objdump.tmp'):
import pickle
with open(file, 'wb+') as f:
pickle.dump(obj, f)
return
def objload(file='objdump.tmp'):
import pickle, os
if not os.path.exists(file):
return
with open(file, 'rb') as f:
return pickle.load(f)