增加段落markdown 样式
This commit is contained in:
149
__main__.py
149
__main__.py
@ -21,7 +21,7 @@ crazy_fns = get_crazy_functions()
|
||||
gr.Chatbot.postprocess = format_io
|
||||
|
||||
# 做一些外观色彩上的调整
|
||||
from theme import adjust_theme, advanced_css, custom_css
|
||||
from theme import adjust_theme, advanced_css, custom_css, small_and_beautiful_theme
|
||||
|
||||
set_theme = adjust_theme()
|
||||
|
||||
@ -75,56 +75,16 @@ class ChatBot(ChatBotFrame):
|
||||
self.chatbot = gr.Chatbot(elem_id='main_chatbot', label=f"当前模型:{LLM_MODEL}")
|
||||
self.chatbot.style()
|
||||
self.history = gr.State([])
|
||||
temp_draw = [gr.HTML() for i in range(7)]
|
||||
with gr.Box(elem_id='chat_box'):
|
||||
with gr.Row():
|
||||
gr.Button(elem_classes='sm_btn').style(size='sm', full_width=False)
|
||||
gr.HTML(func_box.get_html("appearance_switcher.html").format(label=""), elem_id='user_input_tb', elem_classes="insert_block")
|
||||
with gr.Row():
|
||||
self.txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_classes='chat_input').style(container=False)
|
||||
self.input_copy = gr.State('')
|
||||
self.submitBtn = gr.Button("", variant="primary", elem_classes='submit_btn').style(full_width=False)
|
||||
temp_draw = [gr.HTML() for i in range(4)]
|
||||
with gr.Row():
|
||||
self.status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行\n {proxy_info}", elem_id='debug_mes')
|
||||
|
||||
|
||||
def draw_examples(self):
|
||||
with gr.Column(elem_id='examples_col') as self.examples_column:
|
||||
gr.Markdown('# Get Started Quickly')
|
||||
with gr.Row():
|
||||
hide_components = gr.Textbox(visible=False)
|
||||
gr.Button.update = func_box.update_btn
|
||||
self.example = [['今天伦敦天气怎么样?', '对2021年以后的世界和事件了解有限', self.submitBtn.update(elem_id='highlight_update')],
|
||||
['今夕何夕,明月何月?', '偶尔会产生不正确的信息', self.submitBtn.update(elem_id='highlight_update')],
|
||||
['怎么才能把学校给炸了?', '经过训练,会拒绝不适当的请求', self.submitBtn.update(elem_id='highlight_update')]]
|
||||
self.example_inputs = [self.txt, hide_components, self.submitBtn]
|
||||
self.guidance_example = gr.Examples(examples=self.example, inputs=self.example_inputs, label='基础对话')
|
||||
self.guidance_plugins = gr.Dataset(components=[gr.HTML(visible=False)], samples=[['...'] for i in range(4)], label='高级功能', type='index')
|
||||
self.guidance_plugins_state = gr.State()
|
||||
self.guidance_news = gr.Examples(examples=func_box.git_log_list(), inputs=[hide_components, hide_components], label='News')
|
||||
|
||||
def plug_update(index, date_set):
|
||||
variant = crazy_fns[date_set[index]]["Color"] if "Color" in crazy_fns[date_set[index]] else "secondary"
|
||||
ret = {self.switchy_bt: self.switchy_bt.update(value=date_set[index], variant=variant, elem_id='highlight_update'),
|
||||
self.tabs_inputs: gr.Tabs.update(selected='plug_tab'),
|
||||
self.area_crazy_fn: self.area_crazy_fn.update(open=True)}
|
||||
fns_value = func_box.txt_converter_json(str(crazy_fns[date_set[index]].get('Parameters', '')))
|
||||
fns_lable = f"插件[{date_set[index]}]的高级参数说明:\n" + crazy_fns[date_set[index]].get("ArgsReminder", f"没有提供高级参数功能说明")
|
||||
temp_dict = dict(visible=True, interactive=True, value=str(fns_value), label=fns_lable)
|
||||
# 是否唤起高级插件参数区
|
||||
if crazy_fns[date_set[index]].get("AdvancedArgs", False):
|
||||
ret.update({self.plugin_advanced_arg: gr.update(**temp_dict)})
|
||||
ret.update({self.area_crazy_fn: self.area_crazy_fn.update(open=False)})
|
||||
else:
|
||||
ret.update({self.plugin_advanced_arg: gr.update(visible=False, label=f"插件[{date_set[index]}]不需要高级参数。")})
|
||||
return ret
|
||||
|
||||
self.guidance_plugins.select(fn=plug_update, inputs=[self.guidance_plugins, self.guidance_plugins_state],
|
||||
outputs=[self.switchy_bt, self.plugin_advanced_arg, self.tabs_inputs,
|
||||
self.area_crazy_fn])
|
||||
self.txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='chat_txt').style(container=False)
|
||||
self.input_copy = gr.State('')
|
||||
self.submitBtn = gr.Button("提交", variant="primary", visible=False).style(full_width=False)
|
||||
with gr.Row(elem_id='debug_mes'):
|
||||
self.status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
||||
|
||||
def __clear_input(self, inputs):
|
||||
return '', inputs, self.examples_column.update(visible=False)
|
||||
return '', inputs
|
||||
|
||||
def draw_prompt(self):
|
||||
with gr.Row():
|
||||
@ -132,18 +92,17 @@ class ChatBot(ChatBotFrame):
|
||||
container=False)
|
||||
self.pro_entry_btn = gr.Button("搜索", variant="primary").style(full_width=False, size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion(label='Prompt usage frequency'):
|
||||
self.pro_prompt_list = gr.Dataset(components=[gr.HTML(visible=False)], samples_per_page=10,
|
||||
label='Results',
|
||||
samples=[[". . ."] for i in range(20)], type='index')
|
||||
self.pro_prompt_state = gr.State(self.pro_prompt_list)
|
||||
self.pro_prompt_list = gr.Dataset(components=[gr.HTML(visible=False)], samples_per_page=10,
|
||||
label="Prompt usage frequency",
|
||||
samples=[[". . ."] for i in range(20)], type='index')
|
||||
self.pro_prompt_state = gr.State(self.pro_prompt_list)
|
||||
|
||||
def draw_temp_edit(self):
|
||||
with gr.Box():
|
||||
with gr.Row():
|
||||
with gr.Column(scale=100):
|
||||
self.pro_results = gr.Chatbot(label='Prompt and result', elem_id='prompt_result').style()
|
||||
with gr.Column(scale=16):
|
||||
with gr.Column(scale=11):
|
||||
Tips = "用 BORF 分析法设计chat GPT prompt:\n" \
|
||||
"1、阐述背景 B(Background): 说明背景,为chatGPT提供充足的信息\n" \
|
||||
"2、定义目标 O(Objectives):“我们希望实现什么”\n" \
|
||||
@ -187,14 +146,18 @@ class ChatBot(ChatBotFrame):
|
||||
self.pro_func_prompt, self.pro_fp_state])
|
||||
self.pro_reuse_btn.click(
|
||||
fn=func_box.reuse_chat,
|
||||
inputs=[self.pro_results, self.chatbot, self.history, self.pro_name_txt, self.txt],
|
||||
outputs=[self.chatbot, self.history, self.txt, self.tabs_chatbot, self.pro_name_txt, self.examples_column]
|
||||
inputs=[self.pro_results, self.chatbot, self.history, self.pro_name_txt],
|
||||
outputs=[self.chatbot, self.history, self.txt, self.tabs_chatbot, self.pro_name_txt]
|
||||
)
|
||||
|
||||
def draw_function_chat(self):
|
||||
prompt_list, devs_document = get_conf('prompt_list', 'devs_document')
|
||||
with gr.TabItem('Function', id='func_tab'):
|
||||
with gr.Accordion("基础功能区", open=False) as self.area_basic_fn:
|
||||
with gr.Row():
|
||||
# self.cpopyBtn = gr.Button("复制回答", variant="secondary").style(size="sm")
|
||||
self.resetBtn = gr.Button("新建对话", variant="secondary", elem_id='empty_btn').style(size="sm")
|
||||
self.stopBtn = gr.Button("中止对话", variant="stop").style(size="sm")
|
||||
with gr.Tab('Function'):
|
||||
with gr.Accordion("基础功能区", open=True) as self.area_basic_fn:
|
||||
with gr.Row():
|
||||
for k in functional:
|
||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||
@ -206,7 +169,7 @@ class ChatBot(ChatBotFrame):
|
||||
label=f'上传你的Prompt文件, 编写格式请遵循上述开发者文档', )
|
||||
self.pro_private_check = gr.CheckboxGroup(choices=prompt_list['key'], value=prompt_list['value'],
|
||||
label='选择展示Prompt')
|
||||
self.pro_func_prompt = gr.Dataset(components=[gr.HTML()], label="Prompt List", visible=False,
|
||||
self.pro_func_prompt = gr.Dataset(components=[gr.HTML()], label="All Prompt", visible=False,
|
||||
samples=[['...', ""] for i in range(20)], type='index',
|
||||
samples_per_page=10)
|
||||
self.pro_fp_state = gr.State(self.pro_func_prompt)
|
||||
@ -226,7 +189,7 @@ class ChatBot(ChatBotFrame):
|
||||
self.prompt_tab.select(fn=lambda: 1, inputs=None, outputs=self.tabs_code)
|
||||
|
||||
def draw_public_chat(self):
|
||||
with gr.TabItem('Plugins', id='plug_tab'):
|
||||
with gr.Tab('Plugins'):
|
||||
with gr.Accordion("上传本地文件可供高亮函数插件调用", open=False) as self.area_file_up:
|
||||
self.file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)",
|
||||
file_count="multiple")
|
||||
@ -241,23 +204,23 @@ class ChatBot(ChatBotFrame):
|
||||
self.variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
crazy_fns[k]["Button"] = gr.Button(k, variant=self.variant)
|
||||
crazy_fns[k]["Button"].style(size="sm")
|
||||
with gr.Accordion("更多函数插件/高级用法", open=True):
|
||||
dropdown_fn_list = []
|
||||
for k in crazy_fns.keys():
|
||||
if not crazy_fns[k].get("AsButton", True):
|
||||
dropdown_fn_list.append(k)
|
||||
elif crazy_fns[k].get('AdvancedArgs', False):
|
||||
dropdown_fn_list.append(k)
|
||||
self.dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(
|
||||
container=False)
|
||||
self.plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
||||
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
||||
self.switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
||||
with gr.Accordion("更多函数插件/高级用法", open=True):
|
||||
dropdown_fn_list = []
|
||||
for k in crazy_fns.keys():
|
||||
if not crazy_fns[k].get("AsButton", True):
|
||||
dropdown_fn_list.append(k)
|
||||
elif crazy_fns[k].get('AdvancedArgs', False):
|
||||
dropdown_fn_list.append(k)
|
||||
self.dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(
|
||||
container=False)
|
||||
self.plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
||||
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
||||
self.switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
||||
|
||||
|
||||
def draw_setting_chat(self):
|
||||
switch_model = get_conf('switch_model')[0]
|
||||
with gr.TabItem('Settings', id='sett_tab'):
|
||||
with gr.Tab('Settings'):
|
||||
self.top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01, interactive=True,
|
||||
label="Top-p (nucleus sampling)", ).style(container=False)
|
||||
self.temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
|
||||
@ -307,13 +270,12 @@ class ChatBot(ChatBotFrame):
|
||||
self.system_prompt, self.models_box, self.plugin_advanced_arg]
|
||||
self.output_combo = [self.cookies, self.chatbot, self.history, self.status]
|
||||
self.predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=self.input_combo, outputs=self.output_combo)
|
||||
self.clear_agrs = dict(fn=self.__clear_input, inputs=[self.txt], outputs=[self.txt, self.input_copy,
|
||||
self.examples_column])
|
||||
self.clear_agrs = dict(fn=self.__clear_input, inputs=[self.txt], outputs=[self.txt, self.input_copy])
|
||||
# 提交按钮、重置按钮
|
||||
self.cancel_handles.append(self.txt.submit(**self.clear_agrs).then(**self.predict_args))
|
||||
self.cancel_handles.append(self.submitBtn.click(**self.clear_agrs).then(**self.predict_args))
|
||||
# self.cpopyBtn.click(fn=func_box.copy_result, inputs=[self.history], outputs=[self.status])
|
||||
self.resetBtn.click(lambda: ([], [], "已重置"), None, [self.chatbot, self.history, self.status], _js='()=>{clearHistoryHtml();}')
|
||||
self.resetBtn.click(lambda: ([], [], "已重置"), None, [self.chatbot, self.history, self.status])
|
||||
|
||||
def signals_function(self):
|
||||
# 基础功能区的回调函数注册
|
||||
@ -334,8 +296,8 @@ class ChatBot(ChatBotFrame):
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
self.click_handle = crazy_fns[k]["Button"].click(**self.clear_agrs).then(
|
||||
ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*self.input_combo, gr.State(PORT)], self.output_combo)
|
||||
self.click_handle.then(on_report_generated, [self.cookies, self.file_upload, self.chatbot],
|
||||
[self.cookies, self.file_upload, self.chatbot])
|
||||
self.click_handle.then(on_report_generated, [self.file_upload, self.chatbot],
|
||||
[self.file_upload, self.chatbot])
|
||||
# self.click_handle.then(fn=lambda x: '', inputs=[], outputs=self.txt)
|
||||
self.cancel_handles.append(self.click_handle)
|
||||
|
||||
@ -343,7 +305,7 @@ class ChatBot(ChatBotFrame):
|
||||
def on_dropdown_changed(k):
|
||||
# 按钮颜色随变
|
||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
ret = {self.switchy_bt: self.switchy_bt.update(value=k, variant=variant)}
|
||||
ret = {self.switchy_bt: gr.update(value=k, variant=variant)}
|
||||
# 参数取随变
|
||||
fns_value = func_box.txt_converter_json(str(crazy_fns[k].get('Parameters', '')))
|
||||
fns_lable = f"插件[{k}]的高级参数说明:\n" + crazy_fns[k].get("ArgsReminder", f"没有提供高级参数功能说明")
|
||||
@ -354,20 +316,16 @@ class ChatBot(ChatBotFrame):
|
||||
else:
|
||||
ret.update({self.plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
||||
return ret
|
||||
|
||||
self.dropdown.select(on_dropdown_changed, [self.dropdown], [self.switchy_bt, self.plugin_advanced_arg])
|
||||
|
||||
# 随变按钮的回调函数注册
|
||||
def route(k, ipaddr: gr.Request, *args, **kwargs):
|
||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||
append = list(args)
|
||||
append[-2] = func_box.txt_converter_json(append[-2])
|
||||
append.insert(-1, ipaddr)
|
||||
args = tuple(append)
|
||||
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
||||
|
||||
self.click_handle = self.switchy_bt.click(**self.clear_agrs).then(route, [self.switchy_bt, *self.input_combo, gr.State(PORT)], self.output_combo)
|
||||
self.click_handle.then(on_report_generated, [self.cookies, self.file_upload, self.chatbot], [self.cookies, self.file_upload, self.chatbot])
|
||||
self.click_handle.then(on_report_generated, [self.file_upload, self.chatbot], [self.file_upload, self.chatbot])
|
||||
self.cancel_handles.append(self.click_handle)
|
||||
# 终止按钮的回调函数注册
|
||||
self.stopBtn.click(fn=None, inputs=None, outputs=None, cancels=self.cancel_handles)
|
||||
@ -378,12 +336,13 @@ class ChatBot(ChatBotFrame):
|
||||
self.md_dropdown.select(on_md_dropdown_changed, [self.md_dropdown], [self.chatbot])
|
||||
|
||||
def signals_auto_input(self):
|
||||
from autogpt.cli import agent_main
|
||||
self.auto_input_combo = [self.ai_name, self.ai_role, self.ai_goal_list, self.ai_budget,
|
||||
self.cookies, self.chatbot, self.history,
|
||||
self.agent_obj]
|
||||
self.auto_output_combo = [self.cookies, self.chatbot, self.history, self.status,
|
||||
self.agent_obj, self.submit_start, self.submit_next, self.text_continue]
|
||||
|
||||
self.submit_start.click(fn=agent_main, inputs=self.auto_input_combo, outputs=self.auto_output_combo)
|
||||
|
||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
def auto_opentab_delay(self, is_open=False):
|
||||
@ -410,19 +369,13 @@ class ChatBot(ChatBotFrame):
|
||||
# 绘制一个ROW,row会让底下的元素自动排成一行
|
||||
with gr.Row().style(justify='between'):
|
||||
# 绘制列1
|
||||
with gr.Column(scale=44):
|
||||
with gr.Column(scale=46):
|
||||
with gr.Tabs() as self.tabs_copilot:
|
||||
# 绘制对话模组
|
||||
with gr.TabItem('Chat-Copilot'):
|
||||
with gr.Row():
|
||||
# self.cpopyBtn = gr.Button("复制回答", variant="secondary").style(size="sm")
|
||||
self.resetBtn = gr.Button("新建对话", variant="primary", elem_id='empty_btn').style(
|
||||
size="sm")
|
||||
self.stopBtn = gr.Button("中止对话", variant="stop").style(size="sm")
|
||||
with gr.Tabs() as self.tabs_inputs:
|
||||
self.draw_function_chat()
|
||||
self.draw_public_chat()
|
||||
self.draw_setting_chat()
|
||||
self.draw_function_chat()
|
||||
self.draw_public_chat()
|
||||
self.draw_setting_chat()
|
||||
|
||||
# 绘制autogpt模组
|
||||
with gr.TabItem('Auto-GPT'):
|
||||
@ -439,7 +392,6 @@ class ChatBot(ChatBotFrame):
|
||||
|
||||
with self.chat_tab: # 使用 gr.State()对组件进行拷贝时,如果之前绘制了Markdown格式,会导致启动崩溃,所以将 markdown相关绘制放在最后
|
||||
self.draw_chatbot()
|
||||
self.draw_examples()
|
||||
with self.prompt_tab:
|
||||
self.draw_temp_edit()
|
||||
# 函数注册,需要在Blocks下进行
|
||||
@ -449,10 +401,7 @@ class ChatBot(ChatBotFrame):
|
||||
self.signals_public()
|
||||
self.signals_prompt_edit()
|
||||
# self.signals_auto_input()
|
||||
adv_plugins = gr.State([i for i in crazy_fns])
|
||||
self.demo.load(fn=func_box.refresh_load_data, postprocess=False,
|
||||
inputs=[self.chatbot, self.history, self.pro_fp_state, adv_plugins],
|
||||
outputs=[self.pro_func_prompt, self.pro_fp_state, self.chatbot, self.history, self.guidance_plugins, self.guidance_plugins_state])
|
||||
self.demo.load(fn=func_box.refresh_load_data, postprocess=False, inputs=[self.chatbot, self.history, self.pro_fp_state], outputs=[self.pro_func_prompt, self.pro_fp_state, self.chatbot, self.history, ])
|
||||
|
||||
# Start
|
||||
self.auto_opentab_delay()
|
||||
|
||||
@ -175,7 +175,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
except: max_workers = 8
|
||||
if max_workers <= 0: max_workers = 3
|
||||
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
||||
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
|
||||
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-') or llm_kwargs['llm_model'].startswith('proxy-gpt')):
|
||||
max_workers = 1
|
||||
|
||||
executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||
@ -184,7 +184,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
chatbot.append([None, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
# 跨线程传递
|
||||
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
||||
mutable = [[f"", time.time(), "等待中"] for _ in range(n_frag)]
|
||||
|
||||
# 子线程任务
|
||||
def _req_gpt(index, inputs, history, sys_prompt):
|
||||
@ -272,7 +272,8 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
observe_win.append(print_something_really_funny)
|
||||
# 在前端打印些好玩的东西
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
stat_str = ''.join([f'`{inputs_show_user_array[thread_index][0:5]}...{inputs_show_user_array[thread_index][-5:]}`\t'
|
||||
f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
||||
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
||||
# 在前端打印些好玩的东西
|
||||
|
||||
@ -11,50 +11,41 @@
|
||||
mspace {
|
||||
display: block;
|
||||
}
|
||||
|
||||
@keyframes highlight {
|
||||
0%, 100% {
|
||||
border: 2px solid transparent;
|
||||
}
|
||||
50% {
|
||||
border-color: yellow;
|
||||
}
|
||||
.gradio-container-3-32-2 h1 {
|
||||
font-weight: 700 !important;
|
||||
font-size: 28px !important;
|
||||
}
|
||||
|
||||
#highlight_update {
|
||||
animation-name: highlight;
|
||||
animation-duration: 0.75s;
|
||||
animation-iteration-count: 3;
|
||||
.gradio-container-3-32-2 h2 {
|
||||
font-weight: 600 !important;
|
||||
font-size: 24px !important;
|
||||
}
|
||||
|
||||
.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno {
|
||||
border: 0px solid var(--border-color-primary) !important;
|
||||
.gradio-container-3-32-2 h3 {
|
||||
font-weight: 500 !important;
|
||||
font-size: 20px !important;
|
||||
}
|
||||
|
||||
#examples_col {
|
||||
z-index: 2;
|
||||
.gradio-container-3-32-2 h4 {
|
||||
font-weight: 400 !important;
|
||||
font-size: 16px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h5 {
|
||||
font-weight: 300 !important;
|
||||
font-size: 14px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h6 {
|
||||
font-weight: 200 !important;
|
||||
font-size: 12px !important;
|
||||
}
|
||||
#debug_mes {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
margin-bottom: 20% !important;
|
||||
}
|
||||
#hide_examples {
|
||||
z-index: 0;
|
||||
}
|
||||
|
||||
#debug_mes {
|
||||
position: absolute;
|
||||
display: flex;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
z-index: 1; /* 设置更高的 z-index 值 */
|
||||
margin-bottom: -4px !important;
|
||||
align-self: flex-end;
|
||||
margin-bottom: 10px !important;
|
||||
}
|
||||
#chat_box {
|
||||
#chat_txt {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex-direction: column-reverse;
|
||||
overflow-y: auto !important;
|
||||
z-index: 3;
|
||||
flex-grow: 1; /* 自动填充剩余空间 */
|
||||
@ -62,38 +53,8 @@ mspace {
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
margin-bottom: 30px !important;
|
||||
border: 1px solid var(--border-color-primary);
|
||||
margin-bottom: 35px !important;
|
||||
}
|
||||
|
||||
.chat_input {
|
||||
|
||||
}
|
||||
.sm_btn {
|
||||
position: relative;
|
||||
bottom: 5px;
|
||||
height: 10%;
|
||||
border-radius: 20px!important;
|
||||
min-width: min(10%,100%) !important;
|
||||
}
|
||||
/* usage_display */
|
||||
.insert_block {
|
||||
position: relative;
|
||||
bottom: 2px;
|
||||
min-width: min(110px,100%) !important;
|
||||
}
|
||||
|
||||
.submit_btn {
|
||||
flex-direction: column-reverse;
|
||||
overflow-y: auto !important;
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
right: 10px;
|
||||
margin-bottom: 10px !important;
|
||||
min-width: min(50px,100%) !important;
|
||||
}
|
||||
|
||||
|
||||
#sm_btn {
|
||||
display: flex;
|
||||
flex-wrap: unset !important;
|
||||
@ -162,25 +123,10 @@ footer {
|
||||
background: var(--color-accent);
|
||||
padding: var(--block-label-padding);
|
||||
font-size: var(--block-label-text-size); line-height: var(--line-sm);
|
||||
width: auto; min-height: 30px !important;
|
||||
width: auto; min-height: 30px!important;
|
||||
opacity: 1;
|
||||
transition: opacity 0.3s ease-in-out;
|
||||
}
|
||||
textarea.svelte-1pie7s6 {
|
||||
background: #e7e6e6 !important;
|
||||
}
|
||||
|
||||
.dark textarea.svelte-1pie7s6 {
|
||||
background: var(--input-background-fill) !important;
|
||||
}
|
||||
|
||||
.dark input[type=number].svelte-1cl284s {
|
||||
background: #393939 !important;
|
||||
border: var(--input-border-width) solid var(--input-border-color) !important;
|
||||
}
|
||||
.dark input[type="range"] {
|
||||
background: #393939 !important;
|
||||
}
|
||||
#user_info .wrap {
|
||||
opacity: 0;
|
||||
}
|
||||
@ -196,29 +142,31 @@ textarea.svelte-1pie7s6 {
|
||||
gap: 7px !important;
|
||||
border-radius: var(--radius-xl) !important
|
||||
}
|
||||
/* debug_mes */
|
||||
#debug_mes {
|
||||
/* status_display */
|
||||
#status_display {
|
||||
display: flex;
|
||||
min-height: 2em;
|
||||
align-items: flex-end;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
#debug_mes p {
|
||||
#status_display p {
|
||||
font-size: .85em;
|
||||
font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace;
|
||||
/* Windows下中文的monospace会fallback为新宋体,实在太丑,这里折中使用微软雅黑 */
|
||||
color: #000000;
|
||||
}
|
||||
.dark #debug_mes p {
|
||||
color: #ee65ed;
|
||||
color: var(--body-text-color-subdued);
|
||||
}
|
||||
|
||||
#debug_mes {
|
||||
#status_display {
|
||||
transition: all 0.6s;
|
||||
}
|
||||
#main_chatbot {
|
||||
transition: height 0.3s ease;
|
||||
}
|
||||
|
||||
span.svelte-1gfkn6j {
|
||||
background: unset !important;
|
||||
}
|
||||
|
||||
.wrap.svelte-18telvq.svelte-18telvq {
|
||||
padding: var(--block-padding) !important;
|
||||
height: 100% !important;
|
||||
@ -234,34 +182,20 @@ textarea.svelte-1pie7s6 {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.gradio-container-3-32-2 h1 {
|
||||
font-weight: 700 !important;
|
||||
font-size: 28px !important;
|
||||
/* usage_display */
|
||||
.insert_block {
|
||||
position: relative;
|
||||
margin: 0;
|
||||
padding: .5em 1em;
|
||||
box-shadow: var(--block-shadow);
|
||||
border-width: var(--block-border-width);
|
||||
border-color: var(--block-border-color);
|
||||
border-radius: var(--block-radius);
|
||||
background: var(--block-background-fill);
|
||||
width: 100%;
|
||||
line-height: var(--line-sm);
|
||||
min-height: 2em;
|
||||
}
|
||||
|
||||
|
||||
.gradio-container-3-32-2 h2 {
|
||||
font-weight: 600 !important;
|
||||
font-size: 24px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h3 {
|
||||
font-weight: 500 !important;
|
||||
font-size: 20px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h4 {
|
||||
font-weight: 400 !important;
|
||||
font-size: 16px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h5 {
|
||||
font-weight: 300 !important;
|
||||
font-size: 14px !important;
|
||||
}
|
||||
.gradio-container-3-32-2 h6 {
|
||||
font-weight: 200 !important;
|
||||
font-size: 12px !important;
|
||||
}
|
||||
|
||||
|
||||
#usage_display p, #usage_display span {
|
||||
margin: 0;
|
||||
font-size: .85em;
|
||||
@ -320,44 +254,6 @@ textarea.svelte-1pie7s6 {
|
||||
transition: .4s;
|
||||
content: "🌞";
|
||||
}
|
||||
hr.append-display {
|
||||
margin: 8px 0;
|
||||
border: none;
|
||||
height: 1px;
|
||||
border-top-width: 0;
|
||||
background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1));
|
||||
}
|
||||
.source-a {
|
||||
font-size: 0.8em;
|
||||
max-width: 100%;
|
||||
margin: 0;
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
flex-wrap: wrap;
|
||||
align-items: center;
|
||||
/* background-color: #dddddd88; */
|
||||
border-radius: 1.5rem;
|
||||
padding: 0.2em;
|
||||
}
|
||||
.source-a a {
|
||||
display: inline-block;
|
||||
background-color: #aaaaaa50;
|
||||
border-radius: 1rem;
|
||||
padding: 0.5em;
|
||||
text-align: center;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
min-width: 20%;
|
||||
white-space: nowrap;
|
||||
margin: 0.2rem 0.1rem;
|
||||
text-decoration: none !important;
|
||||
flex: 1;
|
||||
transition: flex 0.5s;
|
||||
}
|
||||
.source-a a:hover {
|
||||
background-color: #aaaaaa20;
|
||||
flex: 2;
|
||||
}
|
||||
input:checked + .apSlider {
|
||||
background-color: var(--primary-600);
|
||||
}
|
||||
@ -400,10 +296,10 @@ input[type=range]::-webkit-slider-runnable-track {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.submit_btn, #cancel_btn {
|
||||
#submit_btn, #cancel_btn {
|
||||
height: 42px !important;
|
||||
}
|
||||
.submit_btn::before {
|
||||
#submit_btn::before {
|
||||
content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
|
||||
height: 21px;
|
||||
}
|
||||
@ -609,9 +505,6 @@ thead th {
|
||||
color: #FFF;
|
||||
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
|
||||
}
|
||||
.dark .message pre code {
|
||||
background-color: hsla(0, 0%, 20%, 300%)!important;
|
||||
}
|
||||
.message pre {
|
||||
padding: 0 !important;
|
||||
}
|
||||
|
||||
118
func_box.py
118
func_box.py
@ -32,7 +32,6 @@ import random
|
||||
import gradio as gr
|
||||
import toolbox
|
||||
from prompt_generator import SqliteHandle
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
"""contextlib 是 Python 标准库中的一个模块,提供了一些工具函数和装饰器,用于支持编写上下文管理器和处理上下文的常见任务,例如资源管理、异常处理等。
|
||||
官网:https://docs.python.org/3/library/contextlib.html"""
|
||||
@ -134,7 +133,7 @@ def html_tag_color(tag, color=None, font='black'):
|
||||
|
||||
def html_a_blank(__href, name=''):
|
||||
if not name:
|
||||
name = __href
|
||||
dir_name = __href
|
||||
a = f'<a href="{__href}" target="_blank" class="svelte-xrr240">{name}</a>'
|
||||
return a
|
||||
|
||||
@ -146,6 +145,7 @@ def html_download_blank(__href, file_name='temp', dir_name=''):
|
||||
a = f'<a href="{__href}" target="_blank" download="{dir_name}" class="svelte-xrr240">{file_name}</a>'
|
||||
return a
|
||||
|
||||
|
||||
def ipaddr():
|
||||
# 获取本地ipx
|
||||
ip = psutil.net_if_addrs()
|
||||
@ -153,9 +153,6 @@ def ipaddr():
|
||||
if ip[i][0][3]:
|
||||
return ip[i][0][1]
|
||||
|
||||
def html_local_img(__file):
|
||||
a = f'<div align="center"><img src="file={__file}"></div>'
|
||||
return a
|
||||
|
||||
def encryption_str(txt: str):
|
||||
"""(关键字)(加密间隔)匹配机制(关键字间隔)"""
|
||||
@ -499,22 +496,16 @@ def show_prompt_result(index, data: gr.Dataset, chatbot):
|
||||
return chatbot
|
||||
|
||||
|
||||
|
||||
def pattern_html(html):
|
||||
bs = BeautifulSoup(html, 'html.parser')
|
||||
return bs.get_text(separator='')
|
||||
|
||||
pattern_markdown = re.compile(r'^<div class="markdown-body"><p>|<\/p><\/div>$')
|
||||
pattern_markdown_p = re.compile(r'^<div class="markdown-body">|<\/div>$')
|
||||
def thread_write_chat(chatbot, history):
|
||||
"""
|
||||
对话记录写入数据库
|
||||
"""
|
||||
private_key = toolbox.get_conf('private_key')[0]
|
||||
chat_title = chatbot[0][1].split()
|
||||
i_say = pattern_html(chatbot[-1][0])
|
||||
if history:
|
||||
gpt_result = history
|
||||
else: # 如果历史对话不存在,那么读取对话框
|
||||
gpt_result = [pattern_html(v) for i in chatbot for v in i]
|
||||
i_say = pattern_markdown.sub('', chatbot[-1][0])
|
||||
gpt_result = history
|
||||
if private_key in chat_title:
|
||||
SqliteHandle(f'ai_private_{chat_title[-2]}').inset_prompt({i_say: gpt_result})
|
||||
else:
|
||||
@ -522,21 +513,23 @@ def thread_write_chat(chatbot, history):
|
||||
|
||||
|
||||
base_path = os.path.dirname(__file__)
|
||||
prompt_path = os.path.join(base_path, 'users_data')
|
||||
prompt_path = os.path.join(base_path, 'prompt_users')
|
||||
|
||||
def reuse_chat(result, chatbot, history, pro_numb, say):
|
||||
|
||||
def reuse_chat(result, chatbot, history, pro_numb):
|
||||
"""复用对话记录"""
|
||||
if result is None or result == []:
|
||||
return chatbot, history, gr.update(), gr.update(), '', gr.Column.update()
|
||||
return chatbot, history, gr.update(), gr.update(), ''
|
||||
else:
|
||||
if pro_numb:
|
||||
chatbot += result
|
||||
history += [pattern_html(_) for i in result for _ in i]
|
||||
history += [pattern_markdown.sub('', _) for i in result for _ in i]
|
||||
else:
|
||||
chatbot.append(result[-1])
|
||||
history += [pattern_html(_) for i in result[-2:] for _ in i]
|
||||
history += [pattern_markdown.sub('', _) for i in result[-2:] for _ in i]
|
||||
print(chatbot[-1][0])
|
||||
return chatbot, history, say, gr.Tabs.update(selected='chatbot'), '', gr.Column.update(visible=False)
|
||||
i_say = pattern_markdown.sub('', chatbot[-1][0])
|
||||
return chatbot, history, i_say, gr.Tabs.update(selected='chatbot'), ''
|
||||
|
||||
|
||||
def num_tokens_from_string(listing: list, encoding_name: str = 'cl100k_base') -> int:
|
||||
@ -547,20 +540,20 @@ def num_tokens_from_string(listing: list, encoding_name: str = 'cl100k_base') ->
|
||||
count_tokens += len(encoding.encode(i))
|
||||
return count_tokens
|
||||
|
||||
|
||||
def spinner_chatbot_loading(chatbot):
|
||||
loading = [''.join(['.' * random.randint(1, 5)])]
|
||||
# 将元组转换为列表并修改元素
|
||||
loading_msg = copy.deepcopy(chatbot)
|
||||
temp_list = list(loading_msg[-1])
|
||||
|
||||
temp_list[1] = pattern_html(temp_list[1]) + f'{random.choice(loading)}'
|
||||
if pattern_markdown.match(temp_list[1]):
|
||||
temp_list[1] = pattern_markdown.sub('', temp_list[1]) + f'{random.choice(loading)}'
|
||||
else:
|
||||
temp_list[1] = pattern_markdown_p.sub('', temp_list[1]) + f'{random.choice(loading)}'
|
||||
# 将列表转换回元组并替换原始元组
|
||||
loading_msg[-1] = tuple(temp_list)
|
||||
return loading_msg
|
||||
|
||||
|
||||
def refresh_load_data(chat, history, prompt, crazy_list):
|
||||
def refresh_load_data(chat, history, prompt):
|
||||
"""
|
||||
Args:
|
||||
chat: 聊天组件
|
||||
@ -573,8 +566,7 @@ def refresh_load_data(chat, history, prompt, crazy_list):
|
||||
is_all = toolbox.get_conf('prompt_list')[0]['key'][0]
|
||||
data = prompt_retrieval(is_all=[is_all])
|
||||
prompt.samples = data
|
||||
selected = random.sample(crazy_list, 4)
|
||||
return prompt.update(samples=data, visible=True), prompt, chat, history, gr.Dataset.update(samples=[[i] for i in selected]), selected
|
||||
return prompt.update(samples=data, visible=True), prompt, chat, history
|
||||
|
||||
|
||||
|
||||
@ -596,74 +588,6 @@ def clean_br_string(s):
|
||||
s = re.sub('<\s*br\s*/?>', '\n', s) # 使用正则表达式同时匹配<br>、<br/>、<br />、< br>和< br/>
|
||||
return s
|
||||
|
||||
|
||||
def update_btn(self,
|
||||
value: str = None,
|
||||
variant: str = None,
|
||||
visible: bool = None,
|
||||
interactive: bool = None,
|
||||
elem_id: str = None,
|
||||
label: str = None
|
||||
):
|
||||
if not variant: variant = self.variant
|
||||
if not visible: visible = self.visible
|
||||
if not value: value = self.value
|
||||
if not interactive: interactive = self.interactive
|
||||
if not elem_id: elem_id = self.elem_id
|
||||
if not elem_id: label = self.label
|
||||
return {
|
||||
"variant": variant,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
'elem_id': elem_id,
|
||||
'label': label,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
def update_txt(self,
|
||||
value: str = None,
|
||||
lines: int = None,
|
||||
max_lines: int = None,
|
||||
placeholder: str = None,
|
||||
label: str = None,
|
||||
show_label: bool = None,
|
||||
visible: bool = None,
|
||||
interactive: bool = None,
|
||||
type: str = None,
|
||||
elem_id: str = None
|
||||
):
|
||||
|
||||
return {
|
||||
"lines": self.lines,
|
||||
"max_lines": self.max_lines,
|
||||
"placeholder": self.placeholder,
|
||||
"label": self.label,
|
||||
"show_label": self.show_label,
|
||||
"visible": self.visible,
|
||||
"value": self.value,
|
||||
"type": self.type,
|
||||
"interactive": self.interactive,
|
||||
"elem_id": elem_id,
|
||||
"__type__": "update",
|
||||
|
||||
}
|
||||
|
||||
|
||||
def get_html(filename):
|
||||
path = os.path.join(base_path, "docs/assets", "html", filename)
|
||||
if os.path.exists(path):
|
||||
with open(path, encoding="utf8") as file:
|
||||
return file.read()
|
||||
return ""
|
||||
|
||||
|
||||
def git_log_list():
|
||||
ll = Shell("git log --pretty=format:'%s | %h' -n 10").read()[1].splitlines()
|
||||
|
||||
return [i.split('|') for i in ll if 'branch' not in i][:5]
|
||||
|
||||
|
||||
class YamlHandle:
|
||||
|
||||
def __init__(self, file=os.path.join(prompt_path, 'ai_common.yaml')):
|
||||
@ -709,4 +633,4 @@ class JsonHandle:
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
html_local_img("docs/imgs/openai-api-key-billing-paid-account.png")
|
||||
pass
|
||||
@ -130,9 +130,9 @@ model_info = {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": api2d_endpoint,
|
||||
"max_token": 8192,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# chatglm
|
||||
|
||||
104
toolbox.py
104
toolbox.py
@ -1,5 +1,3 @@
|
||||
import html
|
||||
|
||||
import markdown
|
||||
import importlib
|
||||
import inspect
|
||||
@ -14,7 +12,6 @@ import glob
|
||||
import sys
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
############################### 插件输入输出接驳区 #######################################
|
||||
pj = os.path.join
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
@ -258,39 +255,16 @@ def report_execption(chatbot, history, a, b):
|
||||
|
||||
|
||||
import re
|
||||
def text_divide_paragraph(text):
|
||||
"""
|
||||
将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
if text.startswith(pre) and text.endswith(suf):
|
||||
return text
|
||||
|
||||
if '```' in text:
|
||||
# careful input
|
||||
return pre + text + suf
|
||||
else:
|
||||
# wtf input
|
||||
lines = text.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
lines[i] = lines[i].replace(" ", " ")
|
||||
text = "</br>".join(lines)
|
||||
return pre + text + suf
|
||||
|
||||
|
||||
def text_divide_paragraph(input_str):
|
||||
if input_str:
|
||||
# 提取所有的代码块
|
||||
code_blocks = re.findall(r'```[\s\S]*?```', input_str)
|
||||
|
||||
# 将提取到的代码块用占位符替换
|
||||
for i, block in enumerate(code_blocks):
|
||||
input_str = input_str.replace(block, f'{{{{CODE_BLOCK_{i}}}}}')
|
||||
|
||||
# 判断输入文本是否有反引号
|
||||
if code_blocks:
|
||||
# 将非代码块部分的单个换行符替换为双换行符,并处理四个空格的行
|
||||
sections = re.split(r'({{{{\w+}}}})', input_str)
|
||||
for idx, section in enumerate(sections):
|
||||
if 'CODE_BLOCK' in section or section.startswith(' '):
|
||||
@ -298,11 +272,9 @@ def text_divide_paragraph(input_str):
|
||||
sections[idx] = re.sub(r'(?!```)(?<!\n)\n(?!(\n|^)( {0,3}[\*\+\-]|[0-9]+\.))', '\n\n', section)
|
||||
input_str = ''.join(sections)
|
||||
|
||||
# 将占位符替换回原代码块
|
||||
for i, block in enumerate(code_blocks):
|
||||
input_str = input_str.replace(f'{{{{CODE_BLOCK_{i}}}}}', block.replace('\n', '\n'))
|
||||
else:
|
||||
# 对于没有反引号的字符串,针对四个空格之前的换行符进行处理
|
||||
lines = input_str.split('\n')
|
||||
for idx, line in enumerate(lines[:-1]):
|
||||
if not line.strip():
|
||||
@ -319,9 +291,8 @@ def markdown_convertion(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
"""
|
||||
pre = '<div class="md-message">'
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
raw_hide = f'<div class="raw-message hideM">%s</div>'
|
||||
if txt.startswith(pre) and txt.endswith(suf):
|
||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
@ -334,6 +305,13 @@ def markdown_convertion(txt):
|
||||
}
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
|
||||
def tex2mathml_catch_exception(content, *args, **kwargs):
|
||||
try:
|
||||
content = tex2mathml(content, *args, **kwargs)
|
||||
except:
|
||||
content = content
|
||||
return content
|
||||
|
||||
def replace_math_no_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
@ -349,10 +327,10 @@ def markdown_convertion(txt):
|
||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
||||
content = content.replace('&', ' ')
|
||||
content = tex2mathml(content, display="block")
|
||||
content = tex2mathml_catch_exception(content, display="block")
|
||||
return content
|
||||
else:
|
||||
return tex2mathml(content)
|
||||
return tex2mathml_catch_exception(content)
|
||||
|
||||
def markdown_bug_hunt(content):
|
||||
"""
|
||||
@ -363,6 +341,7 @@ def markdown_convertion(txt):
|
||||
content = content.replace('</script>\n</script>', '</script>')
|
||||
return content
|
||||
|
||||
|
||||
def no_code(txt):
|
||||
if '```' not in txt:
|
||||
return True
|
||||
@ -372,24 +351,21 @@ def markdown_convertion(txt):
|
||||
else:
|
||||
return False
|
||||
|
||||
if ('$$' in txt) and no_code(txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
if ('$' in txt) and no_code(txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text='---')
|
||||
txt = re.sub(r'\$\$((?:.|\n)*?)\$\$', lambda match: '$$' + re.sub(r'\n+', '</br>', match.group(1)) + '$$', txt)
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'],
|
||||
extension_configs=markdown_extension_configs)
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
||||
# 1. convert to easy-to-copy tex (do not render math)
|
||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_1_resp = convert_stage_1.replace('</br>', '')
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1_resp, flags=re.DOTALL)
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
||||
# cat them together
|
||||
context = convert_stage_2_1 + f'{split}' + convert_stage_2_2
|
||||
return raw_hide.replace('%s', func_box.pattern_html(context)) + pre + context + suf
|
||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
else:
|
||||
context = markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists'])
|
||||
return raw_hide.replace('%s', func_box.pattern_html(context)) + pre + context + suf
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
||||
|
||||
|
||||
def close_up_code_segment_during_stream(gpt_reply):
|
||||
@ -425,11 +401,8 @@ def format_io(self, y):
|
||||
if y is None or y == []:
|
||||
return []
|
||||
i_ask, gpt_reply = y[-1]
|
||||
# 输入部分太自由,预处理一波
|
||||
if i_ask is not None: i_ask = text_divide_paragraph(i_ask)
|
||||
# 当代码输出半截的时候,试着补上后个```
|
||||
if gpt_reply is not None: gpt_reply = close_up_code_segment_during_stream(gpt_reply)
|
||||
# process
|
||||
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
|
||||
gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
|
||||
y[-1] = (
|
||||
# None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
|
||||
None if i_ask is None else markdown_convertion(i_ask),
|
||||
@ -478,7 +451,7 @@ def extract_archive(file_path, dest_dir):
|
||||
print("Successfully extracted rar archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("Rar format requires additional dependencies to install")
|
||||
return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件'
|
||||
return '\n\n需要安装pip install rarfile来解压rar文件'
|
||||
|
||||
# 第三方库,需要预先pip install py7zr
|
||||
elif file_extension == '.7z':
|
||||
@ -489,7 +462,7 @@ def extract_archive(file_path, dest_dir):
|
||||
print("Successfully extracted 7z archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("7z format requires additional dependencies to install")
|
||||
return '\n\n解压失败! 需要安装pip install py7zr来解压7z文件'
|
||||
return '\n\n需要安装pip install py7zr来解压7z文件'
|
||||
else:
|
||||
return ''
|
||||
return ''
|
||||
@ -518,17 +491,13 @@ def find_recent_files(directory):
|
||||
|
||||
return recent_files
|
||||
|
||||
def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
||||
def promote_file_to_downloadzone(file, rename_file=None):
|
||||
# 将文件复制一份到下载区
|
||||
import shutil
|
||||
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
|
||||
new_path = os.path.join(f'./gpt_log/', rename_file)
|
||||
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
|
||||
if not os.path.exists(new_path): shutil.copyfile(file, new_path)
|
||||
if chatbot:
|
||||
if 'file_to_promote' in chatbot._cookies: current = chatbot._cookies['file_to_promote']
|
||||
else: current = []
|
||||
chatbot._cookies.update({'file_to_promote': [new_path] + current})
|
||||
if os.path.exists(new_path): os.remove(new_path)
|
||||
shutil.copyfile(file, new_path)
|
||||
|
||||
|
||||
def get_user_upload(chatbot, ipaddr: gr.Request):
|
||||
@ -603,20 +572,16 @@ def on_file_uploaded(files, chatbot, txt, ipaddr: gr.Request):
|
||||
return chatbot, txt
|
||||
|
||||
|
||||
def on_report_generated(cookies, files, chatbot):
|
||||
def on_report_generated(files, chatbot):
|
||||
from toolbox import find_recent_files
|
||||
if 'file_to_promote' in cookies:
|
||||
report_files = cookies['file_to_promote']
|
||||
cookies.pop('file_to_promote')
|
||||
else:
|
||||
report_files = find_recent_files('gpt_log')
|
||||
report_files = find_recent_files('gpt_log')
|
||||
if len(report_files) == 0:
|
||||
return None, chatbot
|
||||
# files.extend(report_files)
|
||||
file_links = ''
|
||||
for f in report_files: file_links += f'<br/><a href="file={os.path.abspath(f)}" target="_blank">{f}</a>'
|
||||
chatbot.append(['报告如何远程获取?', f'报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。{file_links}'])
|
||||
return cookies, report_files, chatbot
|
||||
return report_files, chatbot
|
||||
|
||||
def is_openai_api_key(key):
|
||||
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
|
||||
@ -630,12 +595,11 @@ def is_api2d_key(key):
|
||||
return False
|
||||
|
||||
def is_proxy_key(key):
|
||||
if key.startswith('proxy-') and len(key) == 38:
|
||||
if 'proxy' in key:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def is_any_api_key(key):
|
||||
if ',' in key:
|
||||
keys = key.split(',')
|
||||
@ -657,14 +621,7 @@ def what_keys(keys):
|
||||
if is_api2d_key(k):
|
||||
avail_key_list['API2D Key'] += 1
|
||||
|
||||
for k in key_list:
|
||||
if is_proxy_key(k):
|
||||
avail_key_list['Proxy Key'] += 1
|
||||
|
||||
return f"检测到: \n" \
|
||||
f"OpenAI Key {avail_key_list['OpenAI Key']} 个\n" \
|
||||
f"API2D Key {avail_key_list['API2D Key']} 个\n" \
|
||||
f"Proxy Key {avail_key_list['API2D Key']} 个\n"
|
||||
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个,API2D Key {avail_key_list['API2D Key']} 个"
|
||||
|
||||
def select_api_key(keys, llm_model):
|
||||
import random
|
||||
@ -679,7 +636,7 @@ def select_api_key(keys, llm_model):
|
||||
for k in key_list:
|
||||
if is_api2d_key(k): avail_key_list.append(k)
|
||||
|
||||
if llm_model.startswith('proxy-'):
|
||||
if llm_model.startswith('proxy'):
|
||||
for k in key_list:
|
||||
if is_proxy_key(k): avail_key_list.append(k.replace('proxy-', ''))
|
||||
|
||||
@ -943,7 +900,6 @@ def zip_result(folder):
|
||||
import time
|
||||
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
zip_folder(folder, './gpt_log/', f'{t}-result.zip')
|
||||
return pj('./gpt_log/', f'{t}-result.zip')
|
||||
|
||||
def gen_time_str():
|
||||
import time
|
||||
|
||||
Reference in New Issue
Block a user