增加打印用户信息
This commit is contained in:
@ -126,8 +126,8 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
|
|||||||
label="Temperature", )
|
label="Temperature", )
|
||||||
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
||||||
|
|
||||||
models_box = gr.CheckboxGroup(["input加密", "prompt提示"],
|
models_box = gr.CheckboxGroup(["input加密"],
|
||||||
value=["input加密", "prompt提示"], label="对话模式")
|
value=["input加密"], label="对话模式")
|
||||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"],
|
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"],
|
||||||
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
||||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(
|
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(
|
||||||
|
|||||||
@ -60,7 +60,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=False
|
# make a POST request to the API endpoint, stream=False
|
||||||
from bridge_all import model_info
|
from request_llm.bridge_all import model_info
|
||||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||||
@ -134,7 +134,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||||
|
|
||||||
raw_input = inputs
|
raw_input = inputs
|
||||||
logging.info(f'[raw_input] {raw_input}')
|
logging.info(f'[raw_input]_{llm_kwargs["ipaddr"]} {raw_input}')
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
@ -144,19 +144,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。")
|
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
history.append(inputs); history.append(" ")
|
history.append(inputs); history.append(" ")
|
||||||
|
|
||||||
retry = 0
|
retry = 0
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=True
|
# make a POST request to the API endpoint, stream=True
|
||||||
from .bridge_all import model_info
|
from request_llm.bridge_all import model_info
|
||||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||||
print(response)
|
|
||||||
break
|
|
||||||
except:
|
except:
|
||||||
retry += 1
|
retry += 1
|
||||||
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||||
@ -165,7 +163,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if retry > MAX_RETRY: raise TimeoutError
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
|
||||||
gpt_replying_buffer = ""
|
gpt_replying_buffer = ""
|
||||||
|
|
||||||
is_head_of_the_stream = True
|
is_head_of_the_stream = True
|
||||||
if stream:
|
if stream:
|
||||||
stream_response = response.iter_lines()
|
stream_response = response.iter_lines()
|
||||||
@ -175,14 +173,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
|
if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
|
||||||
# 数据流的第一帧不携带content
|
# 数据流的第一帧不携带content
|
||||||
is_head_of_the_stream = False; continue
|
is_head_of_the_stream = False; continue
|
||||||
|
|
||||||
if chunk:
|
if chunk:
|
||||||
try:
|
try:
|
||||||
chunk_decoded = chunk.decode()
|
chunk_decoded = chunk.decode()
|
||||||
# 前者API2D的
|
# 前者API2D的
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
logging.info(f'[response] {gpt_replying_buffer}')
|
logging.info(f'[response]_{llm_kwargs["ipaddr"]} {gpt_replying_buffer}')
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
chunkjson = json.loads(chunk_decoded[6:])
|
chunkjson = json.loads(chunk_decoded[6:])
|
||||||
@ -266,14 +264,14 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|||||||
"frequency_penalty": 0,
|
"frequency_penalty": 0,
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
print("\033[1;35m", f"{llm_kwargs['llm_model']} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........")
|
print("\033[1;35m", f"{llm_kwargs['llm_model']}_{llm_kwargs['ipaddr']} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........")
|
||||||
except:
|
except:
|
||||||
print('输入中可能存在乱码。')
|
print('输入中可能存在乱码。')
|
||||||
return headers, payload
|
return headers, payload
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
llm_kwargs = {
|
llm_kwargs = {
|
||||||
'api_key': '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',
|
'api_key': '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',
|
||||||
'llm_model': 'gpt-3.5-turbo',
|
'llm_model': 'gpt-3.5-turbo',
|
||||||
'top_p': 1,
|
'top_p': 1,
|
||||||
'max_length': 512,
|
'max_length': 512,
|
||||||
|
|||||||
@ -27,7 +27,9 @@ def ArgsGeneralWrapper(f):
|
|||||||
"""
|
"""
|
||||||
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
||||||
"""
|
"""
|
||||||
def decorated(cookies, max_length, llm_model, txt, top_p, temperature, chatbot, history, system_prompt, models, ipaddr:gr.Request, *args):
|
def decorated(cookies, max_length, llm_model, txt, top_p, temperature,
|
||||||
|
chatbot, history, system_prompt, models, ipaddr:gr.Request, *args):
|
||||||
|
""""""
|
||||||
txt_passon = txt
|
txt_passon = txt
|
||||||
if 'input加密' in models: txt_passon = func_box.encryption_str(txt)
|
if 'input加密' in models: txt_passon = func_box.encryption_str(txt)
|
||||||
# 引入一个有cookie的chatbot
|
# 引入一个有cookie的chatbot
|
||||||
|
|||||||
Reference in New Issue
Block a user