diff --git a/func_box.py b/func_box.py
new file mode 100644
index 0000000..2dcd395
--- /dev/null
+++ b/func_box.py
@@ -0,0 +1,15 @@
+#! .\venv\
+# encoding: utf-8
+# @Time : 2023/4/18
+# @Author : Spike
+# @Descr :
+import hashlib
+
+def md5_str(st):
+ # 创建一个 MD5 对象
+ md5 = hashlib.md5()
+ # 更新 MD5 对象的内容
+ md5.update(str(st).encode())
+ # 获取加密后的结果
+ result = md5.hexdigest()
+ return result
\ No newline at end of file
diff --git a/main_private.py b/main_private.py
new file mode 100644
index 0000000..59deb92
--- /dev/null
+++ b/main_private.py
@@ -0,0 +1,227 @@
+import os;
+
+os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
+import gradio as gr
+from request_llm.bridge_chatgpt import predict
+from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, \
+ DummyWith
+
+# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
+proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
+ get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT',
+ 'API_KEY')
+
+# 如果WEB_PORT是-1, 则随机选取WEB端口
+PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
+if not AUTHENTICATION: AUTHENTICATION = None
+
+from check_proxy import get_current_version
+
+initial_prompt = "Serve me as a writing and programming assistant."
+title_html = f"
ChatGPT 学术优化 {get_current_version()}
"
+description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
+
+# 问询记录, python 版本建议3.9+(越新越好)
+import logging
+
+os.makedirs("gpt_log", exist_ok=True)
+try:
+ logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
+except:
+ logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
+print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
+
+# 一些普通功能模块
+from core_functional import get_core_functions
+
+functional = get_core_functions()
+
+# 高级函数插件
+from crazy_functional import get_crazy_functions
+
+crazy_fns = get_crazy_functions()
+
+# 处理markdown文本格式的转变
+gr.Chatbot.postprocess = format_io
+
+# 做一些外观色彩上的调整
+from theme import adjust_theme, advanced_css
+
+set_theme = adjust_theme()
+
+# 代理与自动更新
+from check_proxy import check_proxy, auto_update
+
+proxy_info = check_proxy(proxies)
+
+gr_L1 = lambda: gr.Row().style()
+gr_L2 = lambda scale: gr.Column(scale=scale)
+if LAYOUT == "TOP-DOWN":
+ gr_L1 = lambda: DummyWith()
+ gr_L2 = lambda scale: gr.Row()
+ CHATBOT_HEIGHT /= 2
+
+cancel_handles = []
+with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
+ gr.HTML(title_html)
+ cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
+ with gr_L1():
+ with gr_L2(scale=2):
+ with gr.Box():
+ chatbot = gr.Chatbot()
+ chatbot.style(height=CHATBOT_HEIGHT)
+ history = gr.State([])
+ with gr.Row():
+ status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
+
+ with gr_L2(scale=1):
+ with gr.Accordion("输入区", open=True) as area_input_primary:
+ with gr.Row():
+ txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
+ with gr.Row():
+ submitBtn = gr.Button("提交", variant="primary")
+ with gr.Row():
+ resetBtn = gr.Button("重置", variant="secondary");
+ resetBtn.style(size="sm")
+ stopBtn = gr.Button("停止", variant="secondary");
+ stopBtn.style(size="sm")
+
+ with gr.Tab('Function'):
+ with gr.Accordion("基础功能区", open=True) as area_basic_fn:
+ with gr.Row():
+ for k in functional:
+ variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
+ functional[k]["Button"] = gr.Button(k, variant=variant)
+ with gr.Tab('Public'):
+ with gr.Row():
+ with gr.Accordion("点击展开“文件上传区”。上传本地文件可供高亮函数插件调用。",
+ open=False) as area_file_up:
+ file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
+ with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
+ with gr.Row():
+ gr.Markdown("注意:以下“高亮”标识的函数插件需从输入区读取路径作为参数.")
+ with gr.Row():
+ for k in crazy_fns:
+ if not crazy_fns[k].get("AsButton", True): continue
+ variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
+ crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
+ crazy_fns[k]["Button"].style(size="sm")
+ with gr.Row():
+ with gr.Accordion("更多函数插件", open=True):
+ dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
+ with gr.Column(scale=1):
+ dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(
+ container=False)
+ with gr.Column(scale=1):
+ switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
+
+ with gr.Tab('Setting'):
+ with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
+ system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt",
+ value=initial_prompt)
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01, interactive=True,
+ label="Top-p (nucleus sampling)", )
+ temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
+ label="Temperature", )
+ checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"],
+ value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
+ gr.Markdown(description)
+
+ with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
+ with gr.Row():
+ txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(
+ container=False)
+ with gr.Row():
+ submitBtn2 = gr.Button("提交", variant="primary")
+ with gr.Row():
+ resetBtn2 = gr.Button("重置", variant="secondary");
+ resetBtn.style(size="sm")
+ stopBtn2 = gr.Button("停止", variant="secondary");
+ stopBtn.style(size="sm")
+
+
+ # 功能区显示开关与功能区的互动
+ def fn_area_visibility(a):
+ ret = {}
+ ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
+ ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
+ ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
+ # ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
+ if "底部输入区" in a: ret.update({txt: gr.update(value="")})
+ return ret
+
+
+ checkboxes.select(fn_area_visibility, [checkboxes],
+ [area_basic_fn, area_crazy_fn, area_input_primary, txt, txt2])
+ # 整理反复出现的控件句柄组合
+ # submitBtn.info
+ input_combo = [cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
+ output_combo = [cookies, chatbot, history, status]
+ predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
+ # 提交按钮、重置按钮
+ cancel_handles.append(txt.submit(**predict_args))
+ cancel_handles.append(txt2.submit(**predict_args))
+ cancel_handles.append(submitBtn.click(**predict_args))
+ cancel_handles.append(submitBtn2.click(**predict_args))
+ resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
+ resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
+ # 基础功能区的回调函数注册
+ for k in functional:
+ click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict),
+ inputs=[*input_combo, gr.State(True), gr.State(k)],
+ outputs=output_combo)
+ cancel_handles.append(click_handle)
+ # 文件上传区,接收文件后与chatbot的互动
+ file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
+ # 函数插件-固定按钮区
+ for k in crazy_fns:
+ if not crazy_fns[k].get("AsButton", True): continue
+ click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]),
+ [*input_combo, gr.State(PORT)], output_combo)
+ click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
+ cancel_handles.append(click_handle)
+
+
+ # 函数插件-下拉菜单与随变按钮的互动
+ def on_dropdown_changed(k):
+ variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
+ return {switchy_bt: gr.update(value=k, variant=variant)}
+
+
+ dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt])
+
+
+ # 随变按钮的回调函数注册
+ def route(k, *args, **kwargs):
+ if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
+ yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
+
+
+ click_handle = switchy_bt.click(route, [switchy_bt, *input_combo, gr.State(PORT)], output_combo)
+ click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
+ # def expand_file_area(file_upload, area_file_up):
+ # if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
+ # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
+ cancel_handles.append(click_handle)
+ # 终止按钮的回调函数注册
+ stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
+ stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
+
+
+# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
+def auto_opentab_delay():
+ import threading, webbrowser, time
+ print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
+ print(f"\t(亮色主题): http://localhost:{PORT}")
+ print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
+
+ def open():
+ time.sleep(2) # 打开浏览器
+ webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
+
+ threading.Thread(target=open, name="open-browser", daemon=True).start()
+ threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
+
+
+auto_opentab_delay()
+demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py
index e9dfc6b..49fe001 100644
--- a/request_llm/bridge_chatgpt.py
+++ b/request_llm/bridge_chatgpt.py
@@ -59,7 +59,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
retry = 0
while True:
try:
- # make a POST request to the API endpoint, stream=False
+ # make a POST requests to the API endpoint, stream=False
response = requests.post(API_URL, headers=headers, proxies=proxies,
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
except requests.exceptions.ReadTimeout as e:
@@ -103,7 +103,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
return result
-def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
+def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', ipaddr='', stream = True, additional_fn=None):
"""
发送至chatGPT,流式获取输出。
用于基础的对话功能。
@@ -132,17 +132,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if stream:
raw_input = inputs
- logging.info(f'[raw_input] {raw_input}')
+ logging.info(f'[raw_input]_{ipaddr} {raw_input}')
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
- headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
+ headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream, ipaddr)
history.append(inputs); history.append(" ")
retry = 0
while True:
try:
- # make a POST request to the API endpoint, stream=True
+ # make a POST requests to the API endpoint, stream=True
response = requests.post(API_URL, headers=headers, proxies=proxies,
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
except:
@@ -168,7 +168,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
try:
if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
# 判定为数据流的结束,gpt_replying_buffer也写完了
- logging.info(f'[response] {gpt_replying_buffer}')
+ logging.info(f'[response]_{ipaddr} {gpt_replying_buffer}')
break
# 处理数据流的主体
chunkjson = json.loads(chunk.decode()[6:])
@@ -198,7 +198,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
return
-def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
+def generate_payload(inputs, llm_kwargs, history, system_prompt, stream, ipaddr):
"""
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
"""
@@ -245,9 +245,8 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"frequency_penalty": 0,
}
try:
- print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
+ print("\033[1;35m", f"{llm_kwargs['llm_model']}_{ipaddr} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
- return headers,payload
-
+ return headers, payload
diff --git a/request_llm/bridge_tgui.py b/request_llm/bridge_tgui.py
index 22a4075..5964008 100644
--- a/request_llm/bridge_tgui.py
+++ b/request_llm/bridge_tgui.py
@@ -90,7 +90,7 @@ async def run(context, max_token=512):
-def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
+def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='', ipaddr='', stream = True, additional_fn=None):
"""
发送至chatGPT,流式获取输出。
用于基础的对话功能。
@@ -108,7 +108,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt=
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
raw_input = "What I would like to say is the following: " + inputs
- logging.info(f'[raw_input] {raw_input}')
+ logging.info(f'[raw_input]_{ipaddr} {raw_input}')
history.extend([inputs, ""])
chatbot.append([inputs, ""])
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
@@ -140,7 +140,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt=
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- logging.info(f'[response] {tgui_say}')
+ logging.info(f'[response]_{ipaddr} {tgui_say}')
diff --git a/toolbox.py b/toolbox.py
index 3ced653..7f5a766 100644
--- a/toolbox.py
+++ b/toolbox.py
@@ -5,6 +5,8 @@ import importlib
import traceback
import inspect
import re
+import gradio as gr
+import hashlib
from latex2mathml.converter import convert as tex2mathml
from functools import wraps, lru_cache
@@ -27,7 +29,7 @@ def ArgsGeneralWrapper(f):
"""
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
"""
- def decorated(cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt, *args):
+ def decorated(cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt, request: gr.Request, *args):
txt_passon = txt
if txt == "" and txt2 != "": txt_passon = txt2
# 引入一个有cookie的chatbot
@@ -46,7 +48,8 @@ def ArgsGeneralWrapper(f):
}
chatbot_with_cookie = ChatBotWithCookies(cookies)
chatbot_with_cookie.write_list(chatbot)
- yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
+ ipaddr = request.client.host
+ yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, ipaddr, *args)
return decorated
def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面