Compare commits

...

7 Commits

Author SHA1 Message Date
fb22f716ff 3d interp 2023-07-25 10:09:50 +08:00
a8093b9dd8 多线程约束越大执行时间 2023-07-22 03:05:04 +08:00
4f55dfdc0e easy 2023-07-22 02:25:30 +08:00
505b10965f CodeInterpreter 2023-07-22 01:48:24 +08:00
a393edfaa4 ALLOW CUSTOM API KEY PATTERN 2023-07-21 22:49:07 +08:00
dd7a01cda5 Merge pull request #976 from fenglui/master
fix msg.data.split(DELIMITER) exception when msg.data is int
2023-07-21 17:02:29 +08:00
00a3b91f95 fix msg.data.split(DELIMITER) exception when msg.data is int 2023-07-21 03:51:33 +08:00
11 changed files with 488 additions and 105 deletions

1
.gitignore vendored
View File

@ -151,3 +151,4 @@ multi-language
request_llm/moss
media
flagged
objdump.tmp

View File

@ -136,4 +136,8 @@ ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
# Claude API KEY
ANTHROPIC_API_KEY = ""
ANTHROPIC_API_KEY = ""
# 自定义API KEY格式
CUSTOM_API_KEY_PATTERN = ""

View File

@ -432,18 +432,18 @@ def get_crazy_functions():
except:
print('Load function plugin failed')
# try:
# from crazy_functions.虚空终端 import 终端
# function_plugins.update({
# "超级终端": {
# "Color": "stop",
# "AsButton": False,
# # "AdvancedArgs": True,
# # "ArgsReminder": "",
# "Function": HotReload(终端)
# }
# })
# except:
# print('Load function plugin failed')
try:
from crazy_functions.虚空终端CodeInterpreter import 虚空终端CodeInterpreter
function_plugins.update({
"虚空终端CodeInterpreter": {
"Color": "stop",
"AsButton": True,
# "AdvancedArgs": True,
# "ArgsReminder": "",
"Function": HotReload(虚空终端CodeInterpreter)
}
})
except:
print('Load function plugin failed')
return function_plugins

View File

@ -228,6 +228,22 @@ def test_chatglm_finetune():
for cookies, cb, hist, msg in (启动微调)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb)
def test_虚空终端CodeInterpreter():
from crazy_functions.虚空终端CodeInterpreter import 虚空终端CodeInterpreter
txt = 'Convert this dataset to excel.'
plugin_kwargs = {"recently_uploaded_files":"build/assets/iris.csv"}
for cookies, cb, hist, msg in (虚空终端CodeInterpreter)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb)
def test_解析项目源代码炫酷版():
from crazy_functions.解析项目源代码炫酷版 import 解析一个Python项目炫酷版
txt = './'
for cookies, cb, hist, msg in (解析一个Python项目炫酷版)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb)
if __name__ == "__main__":
# test_解析一个Python项目()
@ -243,7 +259,9 @@ if __name__ == "__main__":
# test_数学动画生成manim()
# test_Langchain知识库()
# test_Langchain知识库读取()
test_Latex()
# test_Latex()
# test_chatglm_finetune()
# test_虚空终端CodeInterpreter()
test_解析项目源代码炫酷版()
input("程序完成,回车退出。")
print("退出。")

View File

@ -141,7 +141,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
chatbot, history_array, sys_prompt_array,
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
handle_token_exceed=True, show_user_at_complete=False,
retry_times_at_unknown_error=2,
retry_times_at_unknown_error=2, callback_fn=None
):
"""
Request GPT model using multiple threads with UI and high efficiency
@ -166,6 +166,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
handle_token_exceed是否自动处理token溢出的情况如果选择自动处理则会在溢出时暴力截断默认开启
show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
retry_times_at_unknown_error子任务失败时的重试次数
callback_fn: 当信息更新时,在主进程调用的回调函数
输出 Returns:
list: List of GPT model responses 每个子任务的输出汇总如果某个子任务出错response中会携带traceback报错信息方便调试和定位问题。
@ -283,6 +284,9 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
# 在前端打印些好玩的东西
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
# 回调函数
if callback_fn is not None: callback_fn([mutable[thread_index][0] for thread_index in range(len(futures))])
# 结束了吗?
if all(worker_done):
executor.shutdown()
break

View File

@ -1,87 +1,70 @@
from toolbox import CatchException, update_ui, gen_time_str
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import input_clipping
import copy, json
prompt = """
I have to achieve some functionalities by calling one of the functions below.
Your job is to find the correct funtion to use to satisfy my requirement,
and then write python code to call this function with correct parameters.
These are functions you are allowed to choose from:
1.
功能描述: 总结音视频内容
调用函数: ConcludeAudioContent(txt, llm_kwargs)
参数说明:
txt: 音频文件的路径
llm_kwargs: 模型参数, 永远给定None
2.
功能描述: 将每次对话记录写入Markdown格式的文件中
调用函数: WriteMarkdown()
3.
功能描述: 将指定目录下的PDF文件从英文翻译成中文
调用函数: BatchTranslatePDFDocuments_MultiThreaded(txt, llm_kwargs)
参数说明:
txt: PDF文件所在的路径
llm_kwargs: 模型参数, 永远给定None
4.
功能描述: 根据文本使用GPT模型生成相应的图像
调用函数: ImageGeneration(txt, llm_kwargs)
参数说明:
txt: 图像生成所用到的提示文本
llm_kwargs: 模型参数, 永远给定None
5.
功能描述: 对输入的word文档进行摘要生成
调用函数: SummarizingWordDocuments(input_path, output_path)
参数说明:
input_path: 待处理的word文档路径
output_path: 摘要生成后的文档路径
You should always anwser with following format:
----------------
Code:
```
class AutoAcademic(object):
def __init__(self):
self.selected_function = "FILL_CORRECT_FUNCTION_HERE" # e.g., "GenerateImage"
self.txt = "FILL_MAIN_PARAMETER_HERE" # e.g., "荷叶上的蜻蜓"
self.llm_kwargs = None
```
Explanation:
只有GenerateImage和生成图像相关, 因此选择GenerateImage函数。
----------------
Now, this is my requirement:
"""
def get_fn_lib():
return {
"BatchTranslatePDFDocuments_MultiThreaded": ("crazy_functions.批量翻译PDF文档_多线程", "批量翻译PDF文档"),
"SummarizingWordDocuments": ("crazy_functions.总结word文档", "总结word文档"),
"ImageGeneration": ("crazy_functions.图片生成", "图片生成"),
"TranslateMarkdownFromEnglishToChinese": ("crazy_functions.批量Markdown翻译", "Markdown中译英"),
"SummaryAudioVideo": ("crazy_functions.总结音视频", "总结音视频"),
"BatchTranslatePDFDocuments_MultiThreaded": {
"module": "crazy_functions.批量翻译PDF文档_多线程",
"function": "批量翻译PDF文档",
"description": "Translate PDF Documents",
"arg_1_description": "A path containing pdf files.",
},
"SummarizingWordDocuments": {
"module": "crazy_functions.总结word文档",
"function": "总结word文档",
"description": "Summarize Word Documents",
"arg_1_description": "A path containing Word files.",
},
"ImageGeneration": {
"module": "crazy_functions.图片生成",
"function": "图片生成",
"description": "Generate a image that satisfies some description.",
"arg_1_description": "Descriptions about the image to be generated.",
},
"TranslateMarkdownFromEnglishToChinese": {
"module": "crazy_functions.批量Markdown翻译",
"function": "Markdown中译英",
"description": "Translate Markdown Documents from English to Chinese.",
"arg_1_description": "A path containing Markdown files.",
},
"SummaryAudioVideo": {
"module": "crazy_functions.总结音视频",
"function": "总结音视频",
"description": "Get text from a piece of audio and summarize this audio.",
"arg_1_description": "A path containing audio files.",
},
}
functions = [
{
"name": k,
"description": v['description'],
"parameters": {
"type": "object",
"properties": {
"plugin_arg_1": {
"type": "string",
"description": v['arg_1_description'],
},
},
"required": ["plugin_arg_1"],
},
} for k, v in get_fn_lib().items()
]
def inspect_dependency(chatbot, history):
return True
def eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
import subprocess, sys, os, shutil, importlib
with open('gpt_log/void_terminal_runtime.py', 'w', encoding='utf8') as f:
f.write(code)
import importlib
try:
AutoAcademic = getattr(importlib.import_module('gpt_log.void_terminal_runtime', 'AutoAcademic'), 'AutoAcademic')
# importlib.reload(AutoAcademic)
auto_dict = AutoAcademic()
selected_function = auto_dict.selected_function
txt = auto_dict.txt
fp, fn = get_fn_lib()[selected_function]
tmp = get_fn_lib()[code['name']]
fp, fn = tmp['module'], tmp['function']
fn_plugin = getattr(importlib.import_module(fp, fn), fn)
yield from fn_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
arg = json.loads(code['arguments'])['plugin_arg_1']
yield from fn_plugin(arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
except:
from toolbox import trimmed_format_exc
chatbot.append(["执行错误", f"\n```\n{trimmed_format_exc()}\n```\n"])
@ -110,22 +93,27 @@ def 终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_
history = []
# 基本信息:功能、贡献者
chatbot.append(["函数插件功能?", "根据自然语言执行插件命令, 作者: binary-husky, 插件初始化中 ..."])
chatbot.append(["虚空终端插件功能?", "根据自然语言的描述, 执行任意插件命令."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# # 尝试导入依赖, 如果缺少依赖, 则给出安装建议
# dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面
# if not dep_ok: return
# 输入
i_say = prompt + txt
i_say = txt
# 开始
llm_kwargs_function_call = copy.deepcopy(llm_kwargs)
llm_kwargs_function_call['llm_model'] = 'gpt-call-fn' # 修改调用函数
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=txt,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=""
llm_kwargs=llm_kwargs_function_call, chatbot=chatbot, history=[],
sys_prompt=functions
)
# 将代码转为动画
code = get_code_block(gpt_say)
yield from eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
res = json.loads(gpt_say)['choices'][0]
if res['finish_reason'] == 'function_call':
code = json.loads(gpt_say)['choices'][0]
yield from eval_code(code['message']['function_call'], llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
else:
chatbot.append(["无法调用相关功能", res])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@ -0,0 +1,213 @@
from collections.abc import Callable, Iterable, Mapping
from typing import Any
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, promote_file_to_downloadzone, clear_file_downloadzone
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import input_clipping, try_install_deps
from multiprocessing import Process, Pipe
import os
templete = """
```python
import ... # Put dependencies here, e.g. import numpy as np
class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction`
def run(self, path): # The name of the function must be `run`, it takes only a positional argument.
# rewrite the function you have just written here
...
return generated_file_path
```
"""
def inspect_dependency(chatbot, history):
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return True
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) == 1:
return matches[0].strip('python') # code block
for match in matches:
if 'class TerminalFunction' in match:
return match.strip('python') # code block
raise RuntimeError("GPT is not generating proper code.")
def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
# 输入
prompt_compose = [
f'Your job:\n'
f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n',
f"2. You should write this function to perform following task: " + txt + "\n",
f"3. Wrap the output python function with markdown codeblock."
]
i_say = "".join(prompt_compose)
demo = []
# 第一步
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo,
sys_prompt= r"You are a programmer."
)
history.extend([i_say, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# 第二步
prompt_compose = [
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
templete
]
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=inputs_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt= r"You are a programmer."
)
code_to_return = gpt_say
history.extend([i_say, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# # 第三步
# i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them."
# i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`'
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
# inputs=i_say, inputs_show_user=inputs_show_user,
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
# sys_prompt= r"You are a programmer."
# )
# # # 第三步
# i_say = "Show me how to use `pip` to install packages to run the code above. "
# i_say += 'For instance. `pip install -r opencv-python scipy numpy`'
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
# inputs=i_say, inputs_show_user=i_say,
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
# sys_prompt= r"You are a programmer."
# )
installation_advance = ""
return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history
def make_module(code):
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
with open(f'gpt_log/{module_file}.py', 'w', encoding='utf8') as f:
f.write(code)
def get_class_name(class_string):
import re
# Use regex to extract the class name
class_name = re.search(r'class (\w+)\(', class_string).group(1)
return class_name
class_name = get_class_name(code)
return f"gpt_log.{module_file}->{class_name}"
def init_module_instance(module):
import importlib
module_, class_ = module.split('->')
init_f = getattr(importlib.import_module(module_), class_)
return init_f()
def for_immediate_show_off_when_possible(file_type, fp, chatbot):
if file_type in ['png', 'jpg']:
image_path = os.path.abspath(fp)
chatbot.append(['这是一张图片, 展示如下:',
f'本地文件地址: <br/>`{image_path}`<br/>'+
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
])
return chatbot
def subprocess_worker(instance, file_path, return_dict):
return_dict['result'] = instance.run(file_path)
@CatchException
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,暂时没有用武之地
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
# 清空历史,以免输入溢出
history = []; clear_file_downloadzone(chatbot)
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
"CodeInterpreter开源版, 此插件处于开发阶段, 建议暂时不要使用, 作者: binary-husky, 插件初始化中 ..."
])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖, 如果缺少依赖, 则给出安装建议
dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面
if not dep_ok: return
# 读取文件
if ("recently_uploaded_files" in plugin_kwargs) and (plugin_kwargs["recently_uploaded_files"] == ""): plugin_kwargs.pop("recently_uploaded_files")
recently_uploaded_files = plugin_kwargs.get("recently_uploaded_files", None)
file_path = recently_uploaded_files[-1]
file_type = file_path.split('.')[-1]
# 粗心检查
if 'private_upload' in txt:
chatbot.append([
"...",
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 开始干正事
for j in range(5): # 最多重试5次
try:
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
code = get_code_block(code)
res = make_module(code)
instance = init_module_instance(res)
break
except Exception as e:
chatbot.append([f"{j}次代码生成尝试,失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 代码生成结束, 开始执行
try:
import multiprocessing
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(target=subprocess_worker, args=(instance, file_path, return_dict))
# only has 10 seconds to run
p.start(); p.join(timeout=10)
if p.is_alive(): p.terminate(); p.join()
p.close()
res = return_dict['result']
# res = instance.run(file_path)
except Exception as e:
chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
# chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 顺利完成,收尾
res = str(res)
if os.path.exists(res):
chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res])
new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot)
chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
else:
chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
"""
测试:
裁剪图像,保留下半部分
交换图像的蓝色通道和红色通道
将图像转为灰度图像
将csv文件转excel表格
"""

View File

@ -0,0 +1,137 @@
from toolbox import update_ui
from toolbox import CatchException, report_execption, write_results_to_file
from toolbox import objdump, objload
from .crazy_utils import input_clipping
class ThreeJSPlot():
def __init__(self) -> None:
self.files = None
def read_files(self, files):
self.files = files
def launch_render_interface(self):
from vhmap.mcom import mcom
self.visual_bridge = mcom()
def 解析源代码炫酷版(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
import os, copy
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
msg = '正常'
summary_batch_isolation = True
inputs_array = []
inputs_show_user_array = []
history_array = []
sys_prompt_array = []
report_part_1 = []
assert len(file_manifest) <= 512, "源文件太多超过512个, 请缩减输入文件的数量。或者您也可以选择删除此行警告并修改代码拆分file_manifest列表从而实现分批次处理。"
############################## <第一步,逐个文件分析,多线程> ##################################
for index, fp in enumerate(file_manifest):
# 读取文件
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
file_content = f.read()
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
# 装载请求内容
inputs_array.append(i_say)
inputs_show_user_array.append(i_say_show_user)
history_array.append([])
sys_prompt_array.append("你是一个程序架构分析师,正在分析一个源代码项目。你的回答必须简单明了。")
def callback_when_intel_update(gpt_reply_array):
objdump((gpt_reply_array, file_manifest))
return
# 文件读取完成对每一个源代码文件生成一个请求线程发送到chatgpt进行分析
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array = inputs_array,
inputs_show_user_array = inputs_show_user_array,
history_array = history_array,
sys_prompt_array = sys_prompt_array,
llm_kwargs = llm_kwargs,
chatbot = chatbot,
show_user_at_complete = True,
callback_fn=callback_when_intel_update,
)
# 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析
report_part_1 = copy.deepcopy(gpt_response_collection)
history_to_return = report_part_1
res = write_results_to_file(report_part_1)
chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。"))
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
############################## <第二步,综合,单线程,分组+迭代处理> ##################################
############################## <END> ##################################
history_to_return.extend([])
res = write_results_to_file(history_to_return)
chatbot.append(("完成了吗?", res))
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
@CatchException
def 解析一个Python项目炫酷版(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
project_folder = txt
else:
if txt == "": txt = '空空如也的输入栏'
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
if len(file_manifest) == 0:
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from 解析源代码炫酷版(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@CatchException
def 解析任意code项目炫酷版(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
txt_pattern = plugin_kwargs.get("advanced_arg")
txt_pattern = txt_pattern.replace("", ",")
# 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml)
pattern_include = [_.lstrip(" ,").rstrip(" ,") for _ in txt_pattern.split(",") if _ != "" and not _.strip().startswith("^")]
if not pattern_include: pattern_include = ["*"] # 不输入即全部匹配
# 将要忽略匹配的文件后缀(例如: ^*.c, ^*.cpp, ^*.py)
pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")]
pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件
# 将要忽略匹配的文件名(例如: ^README.md)
pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", "\.") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")]
# 生成正则表达式
pattern_except = '/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$'
pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else ''
history.clear()
import glob, os, re
if os.path.exists(txt):
project_folder = txt
else:
if txt == "": txt = '空空如也的输入栏'
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
if len(maybe_dir)>0 and maybe_dir[0].endswith('.extract'):
extract_folder_path = maybe_dir[0]
else:
extract_folder_path = project_folder
# 按输入的匹配模式寻找上传的非压缩文件和已解压的文件
file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \
os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))]
if len(file_manifest) == 0:
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)

View File

@ -160,7 +160,7 @@ def main():
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
cancel_handles.append(click_handle)
# 文件上传区接收文件后与chatbot的互动
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2])
file_upload.upload(on_file_uploaded, [cookies, file_upload, chatbot, txt, txt2, checkboxes], [cookies, chatbot, txt, txt2])
# 函数插件-固定按钮区
for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue

View File

@ -519,7 +519,11 @@ class _ChatHub:
resp_txt_no_link = ""
while not final:
msg = await self.wss.receive()
objects = msg.data.split(DELIMITER)
try:
objects = msg.data.split(DELIMITER)
except :
continue
for obj in objects:
if obj is None or not obj:
continue

View File

@ -60,6 +60,9 @@ def ArgsGeneralWrapper(f):
plugin_kwargs = {
"advanced_arg": plugin_advanced_arg,
}
if "recently_uploaded_files" in cookies:
plugin_kwargs.update({"recently_uploaded_files": cookies["recently_uploaded_files"]})
chatbot_with_cookie = ChatBotWithCookies(cookies)
chatbot_with_cookie.write_list(chatbot)
if cookies.get('lock_plugin', None) is None:
@ -462,6 +465,10 @@ def find_recent_files(directory):
return recent_files
def clear_file_downloadzone(chatbot):
if chatbot:
chatbot._cookies.update({'file_to_promote': []})
def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
# 将文件复制一份到下载区
import shutil
@ -476,13 +483,14 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
if 'file_to_promote' in chatbot._cookies: current = chatbot._cookies['file_to_promote']
else: current = []
chatbot._cookies.update({'file_to_promote': [new_path] + current})
return new_path
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
def on_file_uploaded(cookies, files, chatbot, txt, txt2, checkboxes):
"""
当文件被上传时的回调函数
"""
if len(files) == 0:
return chatbot, txt
return cookies, chatbot, txt
import shutil
import os
import time
@ -512,7 +520,8 @@ def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
f'\n\n现在您点击任意“红颜色”标识的函数插件时,以上文件将被作为输入参数'+err_msg])
return chatbot, txt, txt2
cookies.update({"recently_uploaded_files": moved_files})
return cookies, chatbot, txt, txt2
def on_report_generated(cookies, files, chatbot):
@ -538,7 +547,11 @@ def load_chat_cookies():
return {'api_key': API_KEY, 'llm_model': LLM_MODEL}
def is_openai_api_key(key):
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
CUSTOM_API_KEY_PATTERN, = get_conf('CUSTOM_API_KEY_PATTERN')
if len(CUSTOM_API_KEY_PATTERN) != 0:
API_MATCH_ORIGINAL = re.match(CUSTOM_API_KEY_PATTERN, key)
else:
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
return bool(API_MATCH_ORIGINAL)
def is_azure_api_key(key):
@ -594,7 +607,7 @@ def select_api_key(keys, llm_model):
if is_azure_api_key(k): avail_key_list.append(k)
if len(avail_key_list) == 0:
raise RuntimeError(f"您提供的api-key不满足要求不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源右下角更换模型菜单中可切换openai,azureapi2d请求源")
raise RuntimeError(f"您提供的api-key不满足要求不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源右下角更换模型菜单中可切换openai,azure,claude,api2d请求源)")
api_key = random.choice(avail_key_list) # 随机负载均衡
return api_key
@ -670,12 +683,12 @@ def read_single_conf_with_lru_cache(arg):
# 在读取API_KEY时检查一下是不是忘了改config
if arg == 'API_KEY':
print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和API2D的api-key。也支持同时填写多个api-key如API_KEY=\"openai-key1,openai-key2,api2d-key3\"")
print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和Azure的api-key。也支持同时填写多个api-key如API_KEY=\"openai-key1,openai-key2,azure-key3\"")
print亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s)也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。")
if is_any_api_key(r):
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
else:
print亮红( "[API_KEY] 正确的 API_KEY 'sk'开头的51位密钥OpenAI或者 'fk'开头的41位密钥请在config文件中修改API密钥之后再运行。")
print亮红( "[API_KEY] 的 API_KEY 不满足任何一种已知的密钥格式请在config文件中修改API密钥之后再运行。")
if arg == 'proxies':
if r is None:
print亮红('[PROXY] 网络代理状态未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议检查USE_PROXY选项是否修改。')
@ -685,6 +698,7 @@ def read_single_conf_with_lru_cache(arg):
return r
@lru_cache(maxsize=128)
def get_conf(*args):
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
res = []