搞点东西

This commit is contained in:
w_xiaolizu
2023-04-19 18:37:43 +08:00
parent 7bb005c13b
commit b8e20f5aae
8 changed files with 65 additions and 21 deletions

View File

@ -71,6 +71,8 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
chatbot = gr.Chatbot()
chatbot.style(height=CHATBOT_HEIGHT)
history = gr.State([])
with gr.Row(visible=False):
assist = None
with gr.Row():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
@ -123,8 +125,8 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
label="Top-p (nucleus sampling)", )
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True,
label="Temperature", )
models_box = gr.CheckboxGroup(["input加密"],
value=["input加密"], label="输入模式")
models_box = gr.CheckboxGroup(["input加密", "prompt提示"],
value=["input加密", "prompt提示"], label="对话模式")
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"],
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")

View File

@ -16,10 +16,9 @@ def get_core_functions():
"Suffix": r"",
"Color": r"secondary", # 按钮颜色
},
"中文学术润色": {
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
"Suffix": r"",
"预测输入": {
"Prefix": r"",
"Suffix": "\nAfter answering the questions, list three more questions that users may ask",
},
"查找语法错误": {
"Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " +

View File

@ -67,7 +67,8 @@ def get_crazy_functions():
"Function": HotReload(批量生成函数注释)
},
"[多线程Demo] 解析此项目本身(源码自译解)": {
"Function": HotReload(解析项目本身)
"Function": HotReload(解析项目本身),
"AsButton": False, # 加入下拉菜单中
},
"[多线程demo] 把本项目源代码切换成全英文": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
@ -123,7 +124,7 @@ def get_crazy_functions():
"理解PDF文档内容 模仿ChatPDF": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"AsButton": True, # 加入下拉菜单中
"Function": HotReload(理解PDF文档内容标准文件输入)
},
"[测试功能] 英文Latex项目全文润色输入路径或上传压缩包": {

12
promptgenerator.py Normal file
View File

@ -0,0 +1,12 @@
#! .\venv\
# encoding: utf-8
# @Time : 2023/4/19
# @Author : Spike
# @Descr :
# 默认的prompt

View File

@ -103,7 +103,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
return result
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', ipaddr='', stream = True, additional_fn=None):
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
发送至chatGPT流式获取输出。
用于基础的对话功能。
@ -132,11 +132,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if stream:
raw_input = inputs
logging.info(f'[raw_input]_{ipaddr} {raw_input}')
logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream, ipaddr)
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
history.append(inputs); history.append(" ")
retry = 0
@ -168,7 +168,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
try:
if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
# 判定为数据流的结束gpt_replying_buffer也写完了
logging.info(f'[response]_{ipaddr} {gpt_replying_buffer}')
logging.info(f'[response] {gpt_replying_buffer}')
break
# 处理数据流的主体
chunkjson = json.loads(chunk.decode()[6:])
@ -198,7 +198,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
return
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream, ipaddr):
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
整合所有信息选择LLM模型生成http请求为发送请求做准备
"""
@ -245,7 +245,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream, ipaddr)
"frequency_penalty": 0,
}
try:
print("\033[1;35m", f"{llm_kwargs['llm_model']}_{ipaddr} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........")
print("\033[1;35m", f"{llm_kwargs['llm_model']} :", "\033[0m", f"{conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
return headers, payload

View File

@ -90,7 +90,7 @@ async def run(context, max_token=512):
def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='', ipaddr='', stream = True, additional_fn=None):
def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='' , stream = True, additional_fn=None):
"""
发送至chatGPT流式获取输出。
用于基础的对话功能。
@ -108,7 +108,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt=
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
raw_input = "What I would like to say is the following: " + inputs
logging.info(f'[raw_input]_{ipaddr} {raw_input}')
logging.info(f'[raw_input] {raw_input}')
history.extend([inputs, ""])
chatbot.append([inputs, ""])
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
@ -140,7 +140,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt=
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
logging.info(f'[response]_{ipaddr} {tgui_say}')
logging.info(f'[response] {tgui_say}')

25
test.py Normal file
View File

@ -0,0 +1,25 @@
#! .\venv\
# encoding: utf-8
# @Time : 2023/4/19
# @Author : Spike
# @Descr :
import gradio as gr
def sentence_builder(quantity, xixi):
return f"{quantity}_{xixi}"
with gr.Blocks() as demo:
txt = gr.Textbox(label="Input", lines=2)
txt_2 = gr.CheckboxGroup(['USA', "Japan"], value=['USA'], label='你好呀')
txt_3 = gr.Textbox(value="", label="Output")
btn = gr.Button(value="Submit")
btn.click(sentence_builder, inputs=[txt, txt_2], outputs=[txt_3])
if __name__ == "__main__":
demo.launch()

View File

@ -9,6 +9,7 @@ import gradio as gr
import func_box
from latex2mathml.converter import convert as tex2mathml
from functools import wraps, lru_cache
import logging
############################### 插件输入输出接驳区 #######################################
class ChatBotWithCookies(list):
@ -29,7 +30,7 @@ def ArgsGeneralWrapper(f):
"""
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
"""
def decorated(cookies, txt, top_p, temperature, chatbot, history, system_prompt, models, request: gr.Request, *args):
def decorated(cookies, txt, top_p, temperature, chatbot, history, system_prompt, models , adder: gr.Request, *args):
txt_passon = txt
if 'input加密' in models: txt_passon = func_box.encryption_str(txt)
# 引入一个有cookie的chatbot
@ -48,8 +49,8 @@ def ArgsGeneralWrapper(f):
}
chatbot_with_cookie = ChatBotWithCookies(cookies)
chatbot_with_cookie.write_list(chatbot)
ipaddr = request.client.host
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, ipaddr, *args)
logging.info(f'[user_click]_{adder.client.host} {txt_passon} ----')
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
return decorated
def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
@ -213,7 +214,11 @@ def HotReload(f):
def decorated(*args, **kwargs):
fn_name = f.__name__
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
yield from f_hot_reload(*args, **kwargs)
try:
yield from f_hot_reload(*args, **kwargs)
except TypeError:
args = tuple(args[element] for element in range(len(args)) if element != 6)
yield from f_hot_reload(*args, **kwargs)
return decorated