From 9f3194619b12545f14f17df79c43a4621815c869 Mon Sep 17 00:00:00 2001 From: w_xiaolizu Date: Wed, 19 Apr 2023 21:38:47 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=89=93=E5=8D=B0=E7=94=A8?= =?UTF-8?q?=E6=88=B7=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- __main__.py | 4 ++-- request_llm/bridge_chatgpt.py | 22 ++++++++++------------ toolbox.py | 4 +++- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/__main__.py b/__main__.py index 631ee3c..09d0b8d 100644 --- a/__main__.py +++ b/__main__.py @@ -126,8 +126,8 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled= label="Temperature", ) max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",) - models_box = gr.CheckboxGroup(["input加密", "prompt提示"], - value=["input加密", "prompt提示"], label="对话模式") + models_box = gr.CheckboxGroup(["input加密"], + value=["input加密"], label="对话模式") checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style( diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index 152c5c1..aaa0a2c 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -60,7 +60,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", while True: try: # make a POST request to the API endpoint, stream=False - from bridge_all import model_info + from request_llm.bridge_all import model_info endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] response = requests.post(endpoint, headers=headers, proxies=proxies, json=payload, stream=True, timeout=TIMEOUT_SECONDS); break @@ -134,7 +134,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] raw_input = inputs - logging.info(f'[raw_input] {raw_input}') + logging.info(f'[raw_input]_{llm_kwargs["ipaddr"]} {raw_input}') chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 @@ -144,19 +144,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。") yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 return - + history.append(inputs); history.append(" ") retry = 0 while True: try: # make a POST request to the API endpoint, stream=True - from .bridge_all import model_info + from request_llm.bridge_all import model_info endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS); - print(response) - break + json=payload, stream=True, timeout=TIMEOUT_SECONDS);break except: retry += 1 chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg)) @@ -165,7 +163,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if retry > MAX_RETRY: raise TimeoutError gpt_replying_buffer = "" - + is_head_of_the_stream = True if stream: stream_response = response.iter_lines() @@ -175,14 +173,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()): # 数据流的第一帧不携带content is_head_of_the_stream = False; continue - + if chunk: try: chunk_decoded = chunk.decode() # 前者API2D的 if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0): # 判定为数据流的结束,gpt_replying_buffer也写完了 - logging.info(f'[response] {gpt_replying_buffer}') + logging.info(f'[response]_{llm_kwargs["ipaddr"]} {gpt_replying_buffer}') break # 处理数据流的主体 chunkjson = json.loads(chunk_decoded[6:]) @@ -266,14 +264,14 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): "frequency_penalty": 0, } try: - print("\033[1;35m", f"{llm_kwargs['llm_model']} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........") + print("\033[1;35m", f"{llm_kwargs['llm_model']}_{llm_kwargs['ipaddr']} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........") except: print('输入中可能存在乱码。') return headers, payload if __name__ == '__main__': llm_kwargs = { - 'api_key': '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', + 'api_key': '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', 'llm_model': 'gpt-3.5-turbo', 'top_p': 1, 'max_length': 512, diff --git a/toolbox.py b/toolbox.py index fdd2e79..08dec2d 100644 --- a/toolbox.py +++ b/toolbox.py @@ -27,7 +27,9 @@ def ArgsGeneralWrapper(f): """ 装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。 """ - def decorated(cookies, max_length, llm_model, txt, top_p, temperature, chatbot, history, system_prompt, models, ipaddr:gr.Request, *args): + def decorated(cookies, max_length, llm_model, txt, top_p, temperature, + chatbot, history, system_prompt, models, ipaddr:gr.Request, *args): + """""" txt_passon = txt if 'input加密' in models: txt_passon = func_box.encryption_str(txt) # 引入一个有cookie的chatbot