修改上传文件逻辑:不再删除文件、保留用户文件|增加用户获取历史上传记录
This commit is contained in:
56
__main__.py
56
__main__.py
@ -1,9 +1,11 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
from request_llm.bridge_chatgpt import predict
|
from request_llm.bridge_chatgpt import predict
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, \
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_user_upload, get_conf, \
|
||||||
DummyWith
|
ArgsGeneralWrapper, DummyWith
|
||||||
|
|
||||||
|
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
|
||||||
@ -61,7 +63,7 @@ if LAYOUT == "TOP-DOWN":
|
|||||||
CHATBOT_HEIGHT /= 2
|
CHATBOT_HEIGHT /= 2
|
||||||
|
|
||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
with gr.Blocks(title="ChatGPT For Tester", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
@ -76,6 +78,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
||||||
|
|
||||||
with gr_L2(scale=1):
|
with gr_L2(scale=1):
|
||||||
|
with gr.Tab('对话模式'):
|
||||||
with gr.Accordion("输入区", open=True) as area_input_primary:
|
with gr.Accordion("输入区", open=True) as area_input_primary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
||||||
@ -94,10 +97,14 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||||
functional[k]["Button"] = gr.Button(k, variant=variant)
|
functional[k]["Button"] = gr.Button(k, variant=variant)
|
||||||
with gr.Tab('Public'):
|
with gr.Tab('Public'):
|
||||||
with gr.Row():
|
with gr.Box():
|
||||||
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供高亮函数插件调用。",
|
with gr.Accordion("上传本地文件可供高亮函数插件调用",
|
||||||
open=False) as area_file_up:
|
open=False) as area_file_up:
|
||||||
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)",
|
||||||
|
file_count="multiple")
|
||||||
|
file_upload.style()
|
||||||
|
with gr.Row():
|
||||||
|
upload_history = submitBtn = gr.Button("Get Upload History", variant="primary")
|
||||||
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
gr.Markdown("注意:以下“高亮”标识的函数插件需从输入区读取路径作为参数.")
|
gr.Markdown("注意:以下“高亮”标识的函数插件需从输入区读取路径作为参数.")
|
||||||
@ -109,7 +116,8 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
crazy_fns[k]["Button"].style(size="sm")
|
crazy_fns[k]["Button"].style(size="sm")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Accordion("更多函数插件", open=True):
|
with gr.Accordion("更多函数插件", open=True):
|
||||||
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
dropdown_fn_list = [k for k in crazy_fns.keys() if
|
||||||
|
not crazy_fns[k].get("AsButton", True)]
|
||||||
with gr.Column(scale=1):
|
with gr.Column(scale=1):
|
||||||
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(
|
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(
|
||||||
container=False)
|
container=False)
|
||||||
@ -124,7 +132,8 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
label="Top-p (nucleus sampling)", )
|
label="Top-p (nucleus sampling)", )
|
||||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
|
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
|
||||||
label="Temperature", )
|
label="Temperature", )
|
||||||
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True,
|
||||||
|
label="MaxLength", )
|
||||||
|
|
||||||
models_box = gr.CheckboxGroup(["input加密"],
|
models_box = gr.CheckboxGroup(["input加密"],
|
||||||
value=["input加密"], label="对话模式")
|
value=["input加密"], label="对话模式")
|
||||||
@ -132,11 +141,33 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
container=False)
|
container=False)
|
||||||
|
|
||||||
gr.Markdown(description)
|
gr.Markdown(description)
|
||||||
|
with gr.Tab('Auto-GPT'):
|
||||||
|
with gr.Row():
|
||||||
|
ai_name = gr.Textbox(show_label=False, placeholder="Give AI a name.").style(container=False)
|
||||||
|
with gr.Row():
|
||||||
|
user_input = gr.Textbox(lines=5, show_label=False, placeholder="Describe your AI's role.").style(
|
||||||
|
container=False)
|
||||||
|
with gr.Box():
|
||||||
|
with gr.Row() as goal_list:
|
||||||
|
goal_array = []
|
||||||
|
for text in range(4):
|
||||||
|
goal_array.append(gr.Textbox(show_label=False, placeholder="Enter up to 1 goals.").style(container=False))
|
||||||
|
with gr.Row():
|
||||||
|
submit_add = gr.Button("Adding goals", variant="secondary")
|
||||||
|
with gr.Row():
|
||||||
|
__l = [str(i) for i in range(10, 101, 10)]
|
||||||
|
__l.insert(0, '1')
|
||||||
|
submit_numer = gr.Dropdown(__l, value='1', interactive=True, label='Number of Next').style(
|
||||||
|
container=False)
|
||||||
|
with gr.Row():
|
||||||
|
submit_next = gr.Button("Next", variant="primary")
|
||||||
|
submit_auto = gr.Button("Continuous", variant="secondary")
|
||||||
|
submit_stop = gr.Button("Stop", variant="stop")
|
||||||
|
|
||||||
|
# 整理反复出现的控件句柄组合,
|
||||||
# 整理反复出现的控件句柄组合
|
|
||||||
# submitBtn.info
|
# submitBtn.info
|
||||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, top_p, temperature, chatbot, history, system_prompt, models_box]
|
input_combo = [cookies, max_length_sl, md_dropdown, txt, top_p, temperature, chatbot, history, system_prompt,
|
||||||
|
models_box]
|
||||||
output_combo = [cookies, chatbot, history, status]
|
output_combo = [cookies, chatbot, history, status]
|
||||||
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
||||||
# 提交按钮、重置按钮
|
# 提交按钮、重置按钮
|
||||||
@ -151,6 +182,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 文件上传区,接收文件后与chatbot的互动
|
# 文件上传区,接收文件后与chatbot的互动
|
||||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
||||||
|
upload_history.click(get_user_upload, [chatbot], outputs=[])
|
||||||
# 函数插件-固定按钮区
|
# 函数插件-固定按钮区
|
||||||
for k in crazy_fns:
|
for k in crazy_fns:
|
||||||
if not crazy_fns[k].get("AsButton", True): continue
|
if not crazy_fns[k].get("AsButton", True): continue
|
||||||
@ -164,8 +196,6 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
def on_dropdown_changed(k):
|
def on_dropdown_changed(k):
|
||||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||||
return {switchy_bt: gr.update(value=k, variant=variant)}
|
return {switchy_bt: gr.update(value=k, variant=variant)}
|
||||||
|
|
||||||
|
|
||||||
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt])
|
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt])
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
6
auto_functional.py
Normal file
6
auto_functional.py
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#! .\venv\
|
||||||
|
# encoding: utf-8
|
||||||
|
# @Time : 2023/4/20
|
||||||
|
# @Author : Spike
|
||||||
|
# @Descr :
|
||||||
|
|
||||||
@ -19,9 +19,9 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
||||||
from crazy_functions.解析项目源代码 import 解析一个CSharp项目
|
from crazy_functions.解析项目源代码 import 解析一个CSharp项目
|
||||||
from crazy_functions.总结word文档 import 总结word文档
|
from crazy_functions.总结word文档 import 总结word文档
|
||||||
from crazy_functions.三千问 import 猜你想问
|
from crazy_functions.辅助回答 import 猜你想问
|
||||||
function_plugins = {
|
function_plugins = {
|
||||||
"三千问:猜你想问": {
|
"猜你想问": {
|
||||||
"Function": HotReload(猜你想问)
|
"Function": HotReload(猜你想问)
|
||||||
},
|
},
|
||||||
"解析整个Python项目": {
|
"解析整个Python项目": {
|
||||||
|
|||||||
@ -51,9 +51,10 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
)
|
)
|
||||||
iteration_results.append(gpt_say)
|
iteration_results.append(gpt_say)
|
||||||
last_iteration_result = gpt_say
|
last_iteration_result = gpt_say
|
||||||
|
|
||||||
############################## <第 3 步,整理history> ##################################
|
############################## <第 3 步,整理history> ##################################
|
||||||
final_results.extend(iteration_results)
|
final_results.extend(iteration_results)
|
||||||
|
# 将摘要添加到历史中,方便"猜你想问"使用
|
||||||
|
history.extend([last_iteration_result])
|
||||||
final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。')
|
final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。')
|
||||||
# 接下来两句话只显示在界面上,不起实际作用
|
# 接下来两句话只显示在界面上,不起实际作用
|
||||||
i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。"
|
i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。"
|
||||||
|
|||||||
30
test.py
30
test.py
@ -20,6 +20,32 @@ with gr.Blocks() as demo:
|
|||||||
btn = gr.Button(value="Submit")
|
btn = gr.Button(value="Submit")
|
||||||
btn.click(sentence_builder, inputs=[txt, txt_2], outputs=[txt_3])
|
btn.click(sentence_builder, inputs=[txt, txt_2], outputs=[txt_3])
|
||||||
|
|
||||||
if __name__ == "__main__":
|
class ChatGPTForTester:
|
||||||
demo.launch()
|
|
||||||
|
def __init__(self):
|
||||||
|
self.demo = gr.Blocks()
|
||||||
|
|
||||||
|
def book(self):
|
||||||
|
with self.demo:
|
||||||
|
txt = gr.Textbox(label="Input", lines=2)
|
||||||
|
txt_2 = gr.CheckboxGroup(['USA', "Japan"], value=['USA'], label='你好呀')
|
||||||
|
txt_3 = gr.Textbox(value="", label="Output")
|
||||||
|
btn = gr.Button(value="Submit")
|
||||||
|
btn.click(sentence_builder, inputs=[txt, txt_2], outputs=[txt_3])
|
||||||
|
|
||||||
|
def book2(self):
|
||||||
|
with self.demo:
|
||||||
|
txt = gr.Textbox(label="Input", lines=2)
|
||||||
|
txt_2 = gr.CheckboxGroup(['USA', "Japan"], value=['USA'], label='我好呀')
|
||||||
|
txt_3 = gr.Textbox(value="", label="Output")
|
||||||
|
btn = gr.Button(value="Submit")
|
||||||
|
btn.click(sentence_builder, inputs=[txt, txt_2], outputs=[txt_3])
|
||||||
|
|
||||||
|
def main(self):
|
||||||
|
self.book2()
|
||||||
|
self.book()
|
||||||
|
self.demo.launch()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
ChatGPTForTester().main()
|
||||||
|
|
||||||
|
|||||||
49
toolbox.py
49
toolbox.py
@ -8,6 +8,10 @@ import func_box
|
|||||||
from latex2mathml.converter import convert as tex2mathml
|
from latex2mathml.converter import convert as tex2mathml
|
||||||
from functools import wraps, lru_cache
|
from functools import wraps, lru_cache
|
||||||
import logging
|
import logging
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import glob
|
||||||
############################### 插件输入输出接驳区 #######################################
|
############################### 插件输入输出接驳区 #######################################
|
||||||
class ChatBotWithCookies(list):
|
class ChatBotWithCookies(list):
|
||||||
def __init__(self, cookie):
|
def __init__(self, cookie):
|
||||||
@ -378,29 +382,32 @@ def find_recent_files(directory):
|
|||||||
return recent_files
|
return recent_files
|
||||||
|
|
||||||
|
|
||||||
def on_file_uploaded(files, chatbot, txt):
|
def get_user_upload(chatbot, ipaddr: gr.Request):
|
||||||
|
private_upload = './private_upload'
|
||||||
|
user_history = os.path.join(private_upload, ipaddr.client.host)
|
||||||
|
history = ''
|
||||||
|
for root, d, file in os.walk(private_upload):
|
||||||
|
history += f'目录:{root} 文件: {file}\n'
|
||||||
|
chatbot.append(['我检查了之前上传的文件: ',
|
||||||
|
'[Local Message] 请自行复制以下目录or目录/文件, 供以高亮插件使用\n'
|
||||||
|
f'{history}'
|
||||||
|
])
|
||||||
|
|
||||||
|
def on_file_uploaded(files, chatbot, txt, ipaddr: gr.Request):
|
||||||
if len(files) == 0:
|
if len(files) == 0:
|
||||||
return chatbot, txt
|
return chatbot, txt
|
||||||
import shutil
|
private_upload = './private_upload'
|
||||||
import os
|
# shutil.rmtree('./private_upload/') 不需要删除文件
|
||||||
import time
|
time_tag_path = os.path.join(private_upload, ipaddr.client.host, time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()))
|
||||||
import glob
|
os.makedirs(f'{time_tag_path}', exist_ok=True)
|
||||||
from toolbox import extract_archive
|
|
||||||
try:
|
|
||||||
shutil.rmtree('./private_upload/')
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
|
||||||
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
|
||||||
err_msg = ''
|
err_msg = ''
|
||||||
for file in files:
|
for file in files:
|
||||||
file_origin_name = os.path.basename(file.orig_name)
|
file_origin_name = os.path.basename(file.orig_name)
|
||||||
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
shutil.copy(file.name, f'{time_tag_path}/{file_origin_name}')
|
||||||
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
err_msg += extract_archive(f'{time_tag_path}/{file_origin_name}',
|
||||||
dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
|
dest_dir=f'{time_tag_path}/{file_origin_name}.extract')
|
||||||
moved_files = [fp for fp in glob.glob(
|
moved_files = [fp for fp in glob.glob(f'{time_tag_path}/**/*', recursive=True)]
|
||||||
'private_upload/**/*', recursive=True)]
|
txt = f'{time_tag_path}'
|
||||||
txt = f'private_upload/{time_tag}'
|
|
||||||
moved_files_str = '\t\n\n'.join(moved_files)
|
moved_files_str = '\t\n\n'.join(moved_files)
|
||||||
chatbot.append(['我上传了文件,请查收',
|
chatbot.append(['我上传了文件,请查收',
|
||||||
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
||||||
@ -510,3 +517,9 @@ class DummyWith():
|
|||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
private_upload = './private_upload'
|
||||||
|
for r, d, f in os.walk(private_upload):
|
||||||
|
print(r, f)
|
||||||
|
|||||||
Reference in New Issue
Block a user