diff --git a/__main__.py b/__main__.py index 3c6d0b8..cecd9f6 100644 --- a/__main__.py +++ b/__main__.py @@ -83,6 +83,41 @@ class ChatBot(ChatBotFrame): with gr.Row(elem_id='debug_mes'): self.status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}") + def draw_examples(self): + with gr.Column(elem_id='examples_col') as self.examples_column: + gr.Markdown('# Get Started Quickly') + with gr.Row(): + hide_components = gr.Textbox(visible=False) + gr.Button.update = func_box.update_btn + self.example = [['今天伦敦天气怎么样?', '对2021年以后的世界和事件了解有限', self.submitBtn.update(elem_id='highlight_update')], + ['今夕何夕,明月何月?', '偶尔会产生不正确的信息', self.submitBtn.update(elem_id='highlight_update')], + ['怎么才能把学校给炸了?', '经过训练,会拒绝不适当的请求', self.submitBtn.update(elem_id='highlight_update')]] + self.example_inputs = [self.txt, hide_components, self.submitBtn] + self.guidance_example = gr.Examples(examples=self.example, inputs=self.example_inputs, label='基础对话') + self.guidance_plugins = gr.Dataset(components=[gr.HTML(visible=False)], samples=[['...'] for i in range(4)], label='高级功能', type='index') + self.guidance_plugins_state = gr.State() + self.guidance_news = gr.Examples(examples=func_box.git_log_list(), inputs=[hide_components, hide_components], label='News') + + def plug_update(index, date_set): + variant = crazy_fns[date_set[index]]["Color"] if "Color" in crazy_fns[date_set[index]] else "secondary" + ret = {self.switchy_bt: self.switchy_bt.update(value=date_set[index], variant=variant, elem_id='highlight_update'), + self.tabs_inputs: gr.Tabs.update(selected='plug_tab'), + self.area_crazy_fn: self.area_crazy_fn.update(open=True)} + fns_value = func_box.txt_converter_json(str(crazy_fns[date_set[index]].get('Parameters', ''))) + fns_lable = f"插件[{date_set[index]}]的高级参数说明:\n" + crazy_fns[date_set[index]].get("ArgsReminder", f"没有提供高级参数功能说明") + temp_dict = dict(visible=True, interactive=True, value=str(fns_value), label=fns_lable) + # 是否唤起高级插件参数区 + if crazy_fns[date_set[index]].get("AdvancedArgs", False): + ret.update({self.plugin_advanced_arg: gr.update(**temp_dict)}) + ret.update({self.area_crazy_fn: self.area_crazy_fn.update(open=False)}) + else: + ret.update({self.plugin_advanced_arg: gr.update(visible=False, label=f"插件[{date_set[index]}]不需要高级参数。")}) + return ret + + self.guidance_plugins.select(fn=plug_update, inputs=[self.guidance_plugins, self.guidance_plugins_state], + outputs=[self.switchy_bt, self.plugin_advanced_arg, self.tabs_inputs, + self.area_crazy_fn]) + def __clear_input(self, inputs): return '', inputs @@ -102,7 +137,7 @@ class ChatBot(ChatBotFrame): with gr.Row(): with gr.Column(scale=100): self.pro_results = gr.Chatbot(label='Prompt and result', elem_id='prompt_result').style() - with gr.Column(scale=10): + with gr.Column(scale=11): Tips = "用 BORF 分析法设计chat GPT prompt:\n" \ "1、阐述背景 B(Background): 说明背景,为chatGPT提供充足的信息\n" \ "2、定义目标 O(Objectives):“我们希望实现什么”\n" \ @@ -110,7 +145,8 @@ class ChatBot(ChatBotFrame): "4、试验并调整,改进 E(Evolve):三种改进方法自由组合\n" \ "\t 改进输入:从答案的不足之处着手改进背景B,目标O与关键结果R\n" \ "\t 改进答案:在后续对话中指正chatGPT答案缺点\n" \ - "\t 重新生成:尝试在prompt不变的情况下多次生成结果,优中选优\n" + "\t 重新生成:尝试在prompt不变的情况下多次生成结果,优中选优\n" \ + "\t 熟练使用占位符{{{v}}}: 当Prompt存在占位符,则优先将{{{v}}}替换为预期文本" self.pro_edit_txt = gr.Textbox(show_label=False, info='Prompt编辑区', lines=14, placeholder=Tips).style(container=False) with gr.Row(): @@ -179,7 +215,7 @@ class ChatBot(ChatBotFrame): outputs=[self.pro_func_prompt, self.pro_fp_state, self.pro_private_check]) self.tabs_code = gr.State(0) self.pro_func_prompt.select(fn=func_box.prompt_input, - inputs=[self.txt, self.pro_func_prompt, self.pro_fp_state, self.tabs_code], + inputs=[self.txt, self.pro_edit_txt, self.pro_name_txt, self.pro_func_prompt, self.pro_fp_state, self.tabs_code], outputs=[self.txt, self.pro_edit_txt, self.pro_name_txt]) self.pro_upload_btn.upload(fn=func_box.prompt_upload_refresh, inputs=[self.pro_upload_btn, self.pro_prompt_state], @@ -338,6 +374,15 @@ class ChatBot(ChatBotFrame): self.md_dropdown.select(on_md_dropdown_changed, [self.md_dropdown], [self.chatbot]) + def signals_auto_input(self): + from autogpt.cli import agent_main + self.auto_input_combo = [self.ai_name, self.ai_role, self.ai_goal_list, self.ai_budget, + self.cookies, self.chatbot, self.history, + self.agent_obj] + self.auto_output_combo = [self.cookies, self.chatbot, self.history, self.status, + self.agent_obj, self.submit_start, self.submit_next, self.text_continue] + self.submit_start.click(fn=agent_main, inputs=self.auto_input_combo, outputs=self.auto_output_combo) + # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 def auto_opentab_delay(self, is_open=False): import threading, webbrowser, time @@ -399,7 +444,6 @@ class ChatBot(ChatBotFrame): # Start self.auto_opentab_delay() - self.demo.queue_enabled_for_fn() self.demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, blocked_paths=["config.py", "config_private.py", "docker-compose.yml", "Dockerfile"]) diff --git a/check_proxy.py b/check_proxy.py index 977802d..9b067e9 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -11,7 +11,7 @@ def check_proxy(proxies): country = data['country_name'] result = f"代理配置 {proxies_https}, 代理所在地:{country}" elif 'error' in data: - result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限" + result = f"代理配置 {proxies_https}, 代理所在地:未知" print(result) return result except: diff --git a/core_functional.py b/core_functional.py index f60b3f0..a86cb43 100644 --- a/core_functional.py +++ b/core_functional.py @@ -76,3 +76,7 @@ def get_core_functions(): "Visible": False, } } + + +def get_guidance(): + pass \ No newline at end of file diff --git a/docs/assets/custom.css b/docs/assets/custom.css index 1f845e3..6769abf 100644 --- a/docs/assets/custom.css +++ b/docs/assets/custom.css @@ -11,29 +11,36 @@ mspace { display: block; } -.gradio-container-3-32-2 h1 { - font-weight: 700 !important; - font-size: 28px !important; + +@keyframes highlight { + 0%, 100% { + border: 2px solid transparent; + } + 50% { + border-color: yellow; + } } -.gradio-container-3-32-2 h2 { - font-weight: 600 !important; - font-size: 24px !important; + +#highlight_update { + animation-name: highlight; + animation-duration: 0.75s; + animation-iteration-count: 3; } -.gradio-container-3-32-2 h3 { - font-weight: 500 !important; - font-size: 20px !important; + +.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno { + border: 0px solid var(--border-color-primary) !important; } -.gradio-container-3-32-2 h4 { - font-weight: 400 !important; - font-size: 16px !important; + +#examples_col { + z-index: 3; + position: absolute; + bottom: 0; + left: 0; + width: 100%; + margin-bottom: 30% !important; } -.gradio-container-3-32-2 h5 { - font-weight: 300 !important; - font-size: 14px !important; -} -.gradio-container-3-32-2 h6 { - font-weight: 200 !important; - font-size: 12px !important; +#hide_examples { + z-index: 0; } #debug_mes { @@ -42,7 +49,7 @@ mspace { left: 0; width: 100%; z-index: 1; /* 设置更高的 z-index 值 */ - margin-bottom: 10px !important; + margin-bottom: -4px !important; } #chat_txt { display: flex; @@ -54,8 +61,24 @@ mspace { bottom: 0; left: 0; width: 100%; - margin-bottom: 35px !important; + margin-bottom: 20px !important; } + +.submit_btn { + display: flex; + flex-direction: column-reverse; + overflow-y: auto !important; + z-index: 3; + flex-grow: 1; /* 自动填充剩余空间 */ + position: absolute; + bottom: 0; + right: 0; + width: 100%; + margin-bottom: 20px !important; + min-width: min(50px,100%) !important; +} + + #sm_btn { display: flex; flex-wrap: unset !important; @@ -183,6 +206,33 @@ span.svelte-1gfkn6j { height: 100%; } +.gradio-container-3-32-2 h1 { + font-weight: 700 !important; + font-size: 28px !important; +} + + +.gradio-container-3-32-2 h2 { + font-weight: 600 !important; + font-size: 24px !important; +} +.gradio-container-3-32-2 h3 { + font-weight: 500 !important; + font-size: 20px !important; +} +.gradio-container-3-32-2 h4 { + font-weight: 400 !important; + font-size: 16px !important; +} +.gradio-container-3-32-2 h5 { + font-weight: 300 !important; + font-size: 14px !important; +} +.gradio-container-3-32-2 h6 { + font-weight: 200 !important; + font-size: 12px !important; +} + /* usage_display */ .insert_block { position: relative; @@ -297,10 +347,10 @@ input[type=range]::-webkit-slider-runnable-track { background: transparent; } -#submit_btn, #cancel_btn { +.submit_btn, #cancel_btn { height: 42px !important; } -#submit_btn::before { +.submit_btn::before { content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E"); height: 21px; } diff --git a/func_box.py b/func_box.py index 7fcc70c..a5ea30c 100644 --- a/func_box.py +++ b/func_box.py @@ -133,7 +133,7 @@ def html_tag_color(tag, color=None, font='black'): def html_a_blank(__href, name=''): if not name: - dir_name = __href + name = __href a = f'{name}' return a @@ -152,6 +152,9 @@ def ipaddr(): if ip[i][0][3]: return ip[i][0][1] +def html_local_img(__file): + a = f'
' + return a def encryption_str(txt: str): """(关键字)(加密间隔)匹配机制(关键字间隔)""" @@ -300,6 +303,7 @@ def diff_list(txt='', percent=0.70, switch: list = None, lst: dict = None, sp=15 for key in sorted_dict: # 开始匹配关键字 index = str(key[0]).lower().find(txt.lower()) + if index != -1: # sp=split 用于判断在哪里启动、在哪里断开 if index - sp > 0: @@ -424,7 +428,7 @@ def prompt_save(txt, name, prompt: gr.Dataset, ipaddr: gr.Request): return txt, name, [], prompt.update(samples=result, visible=True), prompt -def prompt_input(txt: str, index, data: gr.Dataset, tabs_index): +def prompt_input(txt: str, prompt_str, name_str, index, data: gr.Dataset, tabs_index): """ 点击dataset的值使用Prompt Args: @@ -436,17 +440,22 @@ def prompt_input(txt: str, index, data: gr.Dataset, tabs_index): """ data_str = str(data.samples[index][1]) data_name = str(data.samples[index][0]) - rp_str = '"""{v}"""' - if data_str.find(rp_str) != -1: - new_txt = data_str.replace(rp_str, txt) - elif txt and tabs_index == 0: - new_txt = data_str + '\n' + txt - else: - new_txt = data_str + rp_str = '{{{v}}}' + + def str_v_handle(__str): + if data_str.find(rp_str) != -1 and __str: + txt_temp = data_str.replace(rp_str, __str) + elif __str: + txt_temp = data_str + '\n' + __str + else: + txt_temp = data_str + return txt_temp if tabs_index == 1: + new_txt = str_v_handle(prompt_str) return txt, new_txt, data_name else: - return new_txt, '', '' + new_txt = str_v_handle(txt) + return new_txt, prompt_str, name_str def copy_result(history): @@ -498,7 +507,10 @@ def thread_write_chat(chatbot, history): private_key = toolbox.get_conf('private_key')[0] chat_title = chatbot[0][1].split() i_say = pattern_markdown.sub('', chatbot[-1][0]) - gpt_result = history + if history: + gpt_result = history + else: # 如果历史对话不存在,那么读取对话框 + gpt_result = [pattern_markdown.sub('', v) for i in chatbot for v in i] if private_key in chat_title: SqliteHandle(f'ai_private_{chat_title[-2]}').inset_prompt({i_say: gpt_result}) else: @@ -508,11 +520,10 @@ def thread_write_chat(chatbot, history): base_path = os.path.dirname(__file__) prompt_path = os.path.join(base_path, 'users_data') - def reuse_chat(result, chatbot, history, pro_numb): """复用对话记录""" if result is None or result == []: - return chatbot, history, gr.update(), gr.update(), '' + return chatbot, history, gr.update(), gr.update(), '', gr.Column.update() else: if pro_numb: chatbot += result @@ -522,7 +533,7 @@ def reuse_chat(result, chatbot, history, pro_numb): history += [pattern_markdown.sub('', _) for i in result[-2:] for _ in i] print(chatbot[-1][0]) i_say = pattern_markdown.sub('', chatbot[-1][0]) - return chatbot, history, i_say, gr.Tabs.update(selected='chatbot'), '' + return chatbot, history, i_say, gr.Tabs.update(selected='chatbot'), '', gr.Column.update(visible=False) def num_tokens_from_string(listing: list, encoding_name: str = 'cl100k_base') -> int: @@ -548,7 +559,7 @@ def spinner_chatbot_loading(chatbot): return loading_msg -def refresh_load_data(chat, history, prompt): +def refresh_load_data(chat, history, prompt, crazy_list): """ Args: chat: 聊天组件 @@ -561,7 +572,8 @@ def refresh_load_data(chat, history, prompt): is_all = toolbox.get_conf('prompt_list')[0]['key'][0] data = prompt_retrieval(is_all=[is_all]) prompt.samples = data - return prompt.update(samples=data, visible=True), prompt, chat, history + selected = random.sample(crazy_list, 4) + return prompt.update(samples=data, visible=True), prompt, chat, history, gr.Dataset.update(samples=[[i] for i in selected]), selected @@ -579,6 +591,74 @@ def txt_converter_json(input_string): return input_string +def clean_br_string(s): + s = re.sub('<\s*br\s*/?>', '\n', s) # 使用正则表达式同时匹配


、< br>和< br/> + return s + + +def update_btn(self, + value: str = None, + variant: str = None, + visible: bool = None, + interactive: bool = None, + elem_id: str = None, + label: str = None +): + if not variant: variant = self.variant + if not visible: visible = self.visible + if not value: value = self.value + if not interactive: interactive = self.interactive + if not elem_id: elem_id = self.elem_id + if not elem_id: label = self.label + return { + "variant": variant, + "visible": visible, + "value": value, + "interactive": interactive, + 'elem_id': elem_id, + 'label': label, + "__type__": "update", + } + +def update_txt(self, + value: str = None, + lines: int = None, + max_lines: int = None, + placeholder: str = None, + label: str = None, + show_label: bool = None, + visible: bool = None, + interactive: bool = None, + type: str = None, + elem_id: str = None + ): + + return { + "lines": self.lines, + "max_lines": self.max_lines, + "placeholder": self.placeholder, + "label": self.label, + "show_label": self.show_label, + "visible": self.visible, + "value": self.value, + "type": self.type, + "interactive": self.interactive, + "elem_id": elem_id, + "__type__": "update", + + } + + +def txtx(f, q): + return f + + +def git_log_list(): + ll = Shell("git log --pretty=format:'%s | %h' -n 10").read()[1].splitlines() + + return [i.split('|') for i in ll if 'branch' not in i][:5] + + class YamlHandle: def __init__(self, file=os.path.join(prompt_path, 'ai_common.yaml')): @@ -614,21 +694,14 @@ class YamlHandle: class JsonHandle: def __init__(self, file): - if os.path.exists(file): - with open(file=file, mode='r') as self.file_obj: - pass - else: - self.file_obj = io.StringIO() # 创建空白文本对象 - self.file_obj.write('{}') # 向文本对象写入有有效 JSON 格式的数据 - self.file_obj.seek(0) # 将文本对象的光标重置到开头 + self.file = file - def load(self): - data = json.load(self.file_obj) + def load(self) -> object: + with open(self.file, 'r') as f: + data = json.load(f) return data if __name__ == '__main__': - result = [['214214', '5657'], ['fasfaf', '41241'],['kkkgh', '1`31`3'],] - ff = [pattern_markdown.sub('', _) for i in result[-2:] for _ in i] - print(ff) \ No newline at end of file + html_local_img("docs/imgs/openai-api-key-billing-paid-account.png") \ No newline at end of file diff --git a/prompt_generator.py b/prompt_generator.py index 47ef65c..eeba13f 100644 --- a/prompt_generator.py +++ b/prompt_generator.py @@ -66,22 +66,37 @@ class SqliteHandle: self.__cursor.execute(f"REPLACE INTO `{self.__table}` (prompt, result) VALUES (?, ?);", (str(key), str(prompt[key]))) self.__connect.commit() - def delete_prompt(self): - self.__cursor.execute(f"DELETE from `{self.__table}` where id BETWEEN 1 AND 21") + def delete_prompt(self, name): + self.__cursor.execute(f"DELETE from `{self.__table}` where prompt LIKE '{name}'") self.__connect.commit() def delete_tabls(self, tab): self.__cursor.execute(f"DROP TABLE `{tab}`;") self.__connect.commit() -def cp_db_data(): + def find_prompt_result(self, name): + query = self.__cursor.execute(f"SELECT result FROM `{self.__table}` WHERE prompt LIKE '{name}'").fetchall() + if query == []: + query = self.__cursor.execute(f"SELECT result FROM `prompt_127.0.0.1` WHERE prompt LIKE '{name}'").fetchall() + return query[0][0] + else: + return query[0][0] + +def cp_db_data(incloud_tab='prompt'): sql_ll = sqlite_handle(database='ai_prompt_cp.db') tabs = sql_ll.get_tables() for i in tabs: - old_data = sqlite_handle(table=i, database='ai_prompt_cp.db').get_prompt_value() - sqlite_handle(table=i).inset_prompt(old_data) + if str(i).startswith(incloud_tab): + old_data = sqlite_handle(table=i, database='ai_prompt_cp.db').get_prompt_value() + sqlite_handle(table=i).inset_prompt(old_data) +def inset_127_prompt(): + sql_handle = sqlite_handle(table='prompt_127.0.0.1') + prompt_json = os.path.join(prompt_path, 'prompts-PlexPt.json') + data_list = func_box.JsonHandle(prompt_json).load() + for i in data_list: + sql_handle.inset_prompt(prompt={i['act']: i['prompt']}) sqlite_handle = SqliteHandle if __name__ == '__main__': - pass \ No newline at end of file + print(sqlite_handle().find_prompt_result('文档转Markdown')) \ No newline at end of file