diff --git a/func_box.py b/func_box.py index f73b135..b6283cb 100644 --- a/func_box.py +++ b/func_box.py @@ -21,7 +21,7 @@ from contextlib import ExitStack import logging import yaml import requests - +import tiktoken logger = logging from sklearn.feature_extraction.text import CountVectorizer import numpy as np @@ -532,6 +532,14 @@ def reuse_chat(result, chatbot, history, pro_numb): i_say = pattern.sub('', chatbot[-1][0]) return chatbot, history, i_say, gr.Tabs.update(selected='chatbot'), '' +@timeStatistics +def num_tokens_from_string(listing: list, encoding_name: str = 'cl100k_base') -> int: + """Returns the number of tokens in a text string.""" + count_tokens = 0 + for i in listing: + encoding = tiktoken.get_encoding(encoding_name) + count_tokens += len(encoding.encode(i)) + return count_tokens class YamlHandle: @@ -583,4 +591,69 @@ class JsonHandle: if __name__ == '__main__': - pass \ No newline at end of file + num = num_tokens_from_string([ + """ + You are jvs, 帮我做一篇关于chatgpt的分享,大纲为ChatGPT 的基本原理和应用、Prompt 的作用和优化、ChatGPT 和 Prompt 的案例研究、ChatGPT 和 Prompt 的挑战和限制、ChatGPT 和 Prompt 的未来发展 +Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications. +GOALS: +1. 在文后,将市面上的AI应用作比较,对每个部分进行适当调整和拓展 +Constraints: # 确定目标 +1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files. +2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. +3. No user assistance +4. Exclusively use the commands listed below e.g. command_name +Commands: # 可执行的行动 +1. analyze_code: Analyze Code, args: "code": "" +2. execute_python_file: Execute Python File, args: "filename": "" +3. execute_shell: Execute Shell Command, non-interactive commands only, args: "command_line": "" +4. execute_shell_popen: Execute Shell Command, non-interactive commands only, args: "command_line": "" +5. append_to_file: Append to file, args: "filename": "", "text": "" +6. delete_file: Delete file, args: "filename": "" +7. list_files: List Files in Directory, args: "directory": "" +8. read_file: Read file, args: "filename": "" +9. write_to_file: Write to file, args: "filename": "", "text": "" +10. google: Google Search, args: "query": "" +11. generate_image: Generate Image, args: "prompt": "" +12. improve_code: Get Improved Code, args: "suggestions": "", "code": "" +13. send_tweet: Send Tweet, args: "tweet_text": "" +14. browse_website: Browse Website, args: "url": "", "question": "" +15. write_tests: Write Tests, args: "code": "", "focus": "" +16. delete_agent: Delete GPT Agent, args: "key": "" +17. get_hyperlinks: Get text summary, args: "url": "" +18. get_text_summary: Get text summary, args: "url": "", "question": "" +19. list_agents: List GPT Agents, args: () -> str +20. message_agent: Message GPT Agent, args: "key": "", "message": "" +21. start_agent: Start GPT Agent, args: "name": "", "task": "", "prompt": "" +22. task_complete: Task Complete (Shutdown), args: "reason": "" +Resources: # 可用资源 +1. Internet access for searches and information gathering. +2. Long Term memory management. +3. GPT-3.5 powered Agents for delegation of simple tasks. +4. File output. +Performance Evaluation: # 自我评估 +1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities. +2. Constructively self-criticize your big-picture behavior constantly. +3. Reflect on past decisions and strategies to refine your approach. +4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps. +5. Write all code to a file. +You should only respond in JSON format as described below +Response Format: # 限制GPT的回答 +{ + "thoughts": { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user" + }, + "command": { + "name": "command name", + "args": { + "arg name": "value" + } + } +} +Ensure the response can be parsed by Python json.loads + """ + ]) + print(num) \ No newline at end of file diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index 2f0e376..4e28e69 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -200,7 +200,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}\t" \ f"本次对话耗时: {func_box.html_tag_color(tag=f'{count_time}s')}" yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 - except Exception as e: traceback.print_exc() yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面 @@ -229,6 +228,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 return + count_tokens = func_box.num_tokens_from_string(listing=history) + status_text += f'\t 本次对话使用tokens: {func_box.html_tag_color(count_tokens)}' + yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): """