另外一种方式导包
This commit is contained in:
11
__main__.py
11
__main__.py
@ -1,5 +1,4 @@
|
||||
import os;
|
||||
|
||||
import os
|
||||
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
import gradio as gr
|
||||
from request_llm.bridge_chatgpt import predict
|
||||
@ -50,7 +49,7 @@ from theme import adjust_theme, advanced_css
|
||||
set_theme = adjust_theme()
|
||||
|
||||
# 代理与自动更新
|
||||
from check_proxy import check_proxy, auto_update
|
||||
from check_proxy import check_proxy, auto_update, warm_up_modules
|
||||
|
||||
proxy_info = check_proxy(proxies)
|
||||
|
||||
@ -125,8 +124,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
||||
label="Top-p (nucleus sampling)", )
|
||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
|
||||
label="Temperature", )
|
||||
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True,
|
||||
label="Local LLM MaxLength", )
|
||||
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
||||
|
||||
models_box = gr.CheckboxGroup(["input加密", "prompt提示"],
|
||||
value=["input加密", "prompt提示"], label="对话模式")
|
||||
@ -167,7 +165,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
||||
outputs=output_combo)
|
||||
cancel_handles.append(click_handle)
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, checkboxes], [chatbot, txt])
|
||||
# 函数插件-固定按钮区
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
@ -215,6 +213,7 @@ def auto_opentab_delay():
|
||||
|
||||
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
||||
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
||||
#threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
||||
|
||||
|
||||
auto_opentab_delay()
|
||||
|
||||
@ -12,11 +12,11 @@ import tiktoken
|
||||
from functools import wraps, lru_cache
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||
from .bridge_chatgpt import predict as chatgpt_ui
|
||||
from request_llm.bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||
from request_llm.bridge_chatgpt import predict as chatgpt_ui
|
||||
|
||||
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
|
||||
from .bridge_chatglm import predict as chatglm_ui
|
||||
from request_llm.bridge_chatgpt import predict_no_ui_long_connection as chatglm_noui
|
||||
from request_llm.bridge_chatgpt import predict as chatglm_ui
|
||||
|
||||
# from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
|
||||
# from .bridge_tgui import predict as tgui_ui
|
||||
|
||||
@ -60,7 +60,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=False
|
||||
from .bridge_all import model_info
|
||||
from bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||
@ -154,7 +154,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
from .bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);
|
||||
print(response)
|
||||
break
|
||||
except:
|
||||
retry += 1
|
||||
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||
@ -269,3 +271,15 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
print('输入中可能存在乱码。')
|
||||
return headers, payload
|
||||
|
||||
if __name__ == '__main__':
|
||||
llm_kwargs = {
|
||||
'api_key': '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',
|
||||
'llm_model': 'gpt-3.5-turbo',
|
||||
'top_p': 1,
|
||||
'max_length': 512,
|
||||
'temperature': 1,
|
||||
# 'ipaddr': ipaddr.client.host
|
||||
}
|
||||
chat = []
|
||||
predict('你好', llm_kwargs=llm_kwargs, chatbot=chat, plugin_kwargs={})
|
||||
print(chat)
|
||||
@ -376,7 +376,7 @@ def find_recent_files(directory):
|
||||
return recent_files
|
||||
|
||||
|
||||
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
|
||||
def on_file_uploaded(files, chatbot, txt, checkboxes):
|
||||
if len(files) == 0:
|
||||
return chatbot, txt
|
||||
import shutil
|
||||
|
||||
Reference in New Issue
Block a user