diff --git a/Dockerfile b/Dockerfile index 19d988f..97ad13d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,12 +10,16 @@ RUN echo '[global]' > /etc/pip.conf && \ WORKDIR /gpt -# 装载项目文件 -COPY . . + + # 安装依赖 +COPY requirements.txt ./ +COPY ./docs/gradio-3.32.2-py3-none-any.whl ./docs/gradio-3.32.2-py3-none-any.whl +RUN pip3 install -r requirements.txt +# 装载项目文件 +COPY . . RUN pip3 install -r requirements.txt - # 可选步骤,用于预热模块 RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' diff --git a/README.md b/README.md index 02f047d..39b37ea 100644 --- a/README.md +++ b/README.md @@ -43,10 +43,11 @@ chat分析报告生成 | [函数插件] 运行后自动生成总结汇报 [Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF [谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) 互联网信息聚合+GPT | [函数插件] 一键[让GPT先从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck),再回答问题,让信息永不过时 +⭐Arxiv论文精细翻译 | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),迄今为止最好的论文翻译工具⭐ 公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮 多线程函数插件支持 | 支持多线调用chatgpt,一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序 启动暗色gradio[主题](https://github.com/binary-husky/chatgpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题 -[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持,[API2D](https://api2d.com/)接口支持 | 同时被GPT3.5、GPT4、[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧? +[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧? 更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama),[RWKV](https://github.com/BlinkDL/ChatRWKV)和[盘古α](https://openi.org.cn/pangu/) 更多新功能展示(图像生成等) …… | 见本文档结尾处 …… @@ -227,38 +228,33 @@ docker-compose up 1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件, 另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。 -Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史html存档缓存,点击 `删除所有本地对话历史记录` 可以删除所有html存档缓存。 +Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史html存档缓存。
-
-
+
+
+
-
-
+
+
#{show_html}#
') + else: + f.write(f'{show_html}
') + node = node.next + if node is None: break + + for n in nodes: n.next = None # break + return_dict['nodes'] = nodes + return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt + return return_dict + + + class LatexPaperSplit(): """ - 将Latex文档分解到一个链表中,每个链表节点用preserve的标志位提示它是否应当被GPT处理 + break down latex file to a linked list, + each node use a preserve flag to indicate whether it should + be proccessed by GPT. """ def __init__(self) -> None: - """ - root是链表的根节点 - """ - self.root = None + self.nodes = None + self.msg = "{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \ + "版权归原文作者所有。翻译内容可靠性无任何保障,请仔细鉴别并以原文为准。" + \ + "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" + # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者) + self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" def merge_result(self, arr, mode, msg): """ - 将GPT处理后的结果融合 + Merge the result after the GPT process completed """ result_string = "" - node = self.root p = 0 - while True: + for node in self.nodes: if node.preserve: result_string += node.string else: result_string += fix_content(arr[p], node.string) p += 1 - node = node.next - if node is None: break if mode == 'translate_zh': - try: - pattern = re.compile(r'\\begin\{abstract\}.*\n') - match = pattern.search(result_string) - position = match.end() - result_string = result_string[:position] + \ - "{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成,其内容可靠性没有任何保障,请仔细鉴别并以原文为准。" + \ - "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" + \ - msg + \ - "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" + \ - result_string[position:] - except: - pass + pattern = re.compile(r'\\begin\{abstract\}.*\n') + match = pattern.search(result_string) + position = match.end() + result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:] return result_string - def split(self, txt, project_folder): + def split(self, txt, project_folder, opts): """ - 将Latex文档分解到一个链表中,每个链表节点用preserve的标志位提示它是否应当被GPT处理 + break down latex file to a linked list, + each node use a preserve flag to indicate whether it should + be proccessed by GPT. + P.S. use multiprocessing to avoid timeout error """ - root = LinkedListNode(txt, False) - def split_worker(root, pattern, flags=0): - lt = root - cnt = 0 - pattern_compile = re.compile(pattern, flags) - while True: - if not lt.preserve: - while True: - res = pattern_compile.search(lt.string) - if not res: break - before = res.string[:res.span()[0]] - this = res.group(0) - after = res.string[res.span()[1]:] - # ====== - lt.string = before - tmp = lt.next - # ====== - mid = LinkedListNode(this, True) - lt.next = mid - # ====== - aft = LinkedListNode(after, False) - mid.next = aft - aft.next = tmp - # ====== - lt = aft - lt = lt.next - cnt += 1 - # print(cnt) - if lt is None: break - - def split_worker_begin_end(root, pattern, flags=0, limit_n_lines=25): - lt = root - cnt = 0 - pattern_compile = re.compile(pattern, flags) - while True: - if not lt.preserve: - while True: - target_string = lt.string - - def search_with_line_limit(target_string): - for res in pattern_compile.finditer(target_string): - cmd = res.group(1) # begin{what} - this = res.group(2) # content between begin and end - white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof', 'em', 'emph', 'textit', 'textbf'] - if cmd in white_list or this.count('\n') > 25: - sub_res = search_with_line_limit(this) - if not sub_res: continue - else: return sub_res - else: - return res.group(0) - return False - # ====== - # search for first encounter of \begin \end pair with less than 25 lines in the middle - ps = search_with_line_limit(target_string) - if not ps: break - res = re.search(re.escape(ps), target_string, flags) - if not res: assert False - before = res.string[:res.span()[0]] - this = res.group(0) - after = res.string[res.span()[1]:] - # ====== - lt.string = before - tmp = lt.next - # ====== - mid = LinkedListNode(this, True) - lt.next = mid - # ====== - aft = LinkedListNode(after, False) - mid.next = aft - aft.next = tmp - # ====== - lt = aft - lt = lt.next - cnt += 1 - # print(cnt) - if lt is None: break - - - # root 是链表的头 - print('正在分解Latex源文件,构建链表结构') - # 删除iffalse注释 - split_worker(root, r"\\iffalse(.*?)\\fi", re.DOTALL) - # 吸收在25行以内的begin-end组合 - split_worker_begin_end(root, r"\\begin\{([a-z\*]*)\}(.*?)\\end\{\1\}", re.DOTALL, limit_n_lines=25) - # 吸收匿名公式 - split_worker(root, r"\$\$(.*?)\$\$", re.DOTALL) - # 吸收其他杂项 - split_worker(root, r"(.*?)\\maketitle", re.DOTALL) - split_worker(root, r"\\section\{(.*?)\}") - split_worker(root, r"\\section\*\{(.*?)\}") - split_worker(root, r"\\subsection\{(.*?)\}") - split_worker(root, r"\\subsubsection\{(.*?)\}") - split_worker(root, r"\\bibliography\{(.*?)\}") - split_worker(root, r"\\bibliographystyle\{(.*?)\}") - split_worker(root, r"\\begin\{lstlisting\}(.*?)\\end\{lstlisting\}", re.DOTALL) - split_worker(root, r"\\begin\{wraptable\}(.*?)\\end\{wraptable\}", re.DOTALL) - split_worker(root, r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}", re.DOTALL) - split_worker(root, r"\\begin\{wrapfigure\}(.*?)\\end\{wrapfigure\}", re.DOTALL) - split_worker(root, r"\\begin\{wrapfigure\*\}(.*?)\\end\{wrapfigure\*\}", re.DOTALL) - split_worker(root, r"\\begin\{figure\}(.*?)\\end\{figure\}", re.DOTALL) - split_worker(root, r"\\begin\{figure\*\}(.*?)\\end\{figure\*\}", re.DOTALL) - split_worker(root, r"\\begin\{multline\}(.*?)\\end\{multline\}", re.DOTALL) - split_worker(root, r"\\begin\{multline\*\}(.*?)\\end\{multline\*\}", re.DOTALL) - split_worker(root, r"\\begin\{table\}(.*?)\\end\{table\}", re.DOTALL) - split_worker(root, r"\\begin\{table\*\}(.*?)\\end\{table\*\}", re.DOTALL) - split_worker(root, r"\\begin\{minipage\}(.*?)\\end\{minipage\}", re.DOTALL) - split_worker(root, r"\\begin\{minipage\*\}(.*?)\\end\{minipage\*\}", re.DOTALL) - split_worker(root, r"\\begin\{align\*\}(.*?)\\end\{align\*\}", re.DOTALL) - split_worker(root, r"\\begin\{align\}(.*?)\\end\{align\}", re.DOTALL) - split_worker(root, r"\\begin\{equation\}(.*?)\\end\{equation\}", re.DOTALL) - split_worker(root, r"\\begin\{equation\*\}(.*?)\\end\{equation\*\}", re.DOTALL) - split_worker(root, r"\\item ") - split_worker(root, r"\\label\{(.*?)\}") - split_worker(root, r"\\begin\{(.*?)\}") - split_worker(root, r"\\vspace\{(.*?)\}") - split_worker(root, r"\\hspace\{(.*?)\}") - split_worker(root, r"\\end\{(.*?)\}") - - node = root - while True: - if len(node.string.strip('\n').strip(''))==0: node.preserve = True - if len(node.string.strip('\n').strip(''))<50: node.preserve = True - node = node.next - if node is None: break - - # 修复括号 - node = root - while True: - string = node.string - if node.preserve: - node = node.next - if node is None: break - continue - def break_check(string): - str_stack = [""] # (lv, index) - for i, c in enumerate(string): - if c == '{': - str_stack.append('{') - elif c == '}': - if len(str_stack) == 1: - print('stack kill') - return i - str_stack.pop(-1) - else: - str_stack[-1] += c - return -1 - bp = break_check(string) - - if bp == -1: - pass - elif bp == 0: - node.string = string[:1] - q = LinkedListNode(string[1:], False) - q.next = node.next - node.next = q - else: - node.string = string[:bp] - q = LinkedListNode(string[bp:], False) - q.next = node.next - node.next = q - - node = node.next - if node is None: break - - node = root - while True: - if len(node.string.strip('\n').strip(''))==0: node.preserve = True - if len(node.string.strip('\n').strip(''))<50: node.preserve = True - node = node.next - if node is None: break - - # 将前后断行符脱离 - node = root - prev_node = None - while True: - if not node.preserve: - lstriped_ = node.string.lstrip().lstrip('\n') - if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)): - prev_node.string += node.string[:-len(lstriped_)] - node.string = lstriped_ - rstriped_ = node.string.rstrip().rstrip('\n') - if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)): - node.next.string = node.string[len(rstriped_):] + node.next.string - node.string = rstriped_ - # ===== - prev_node = node - node = node.next - if node is None: break - - # 将分解结果返回 res_to_t - with open(pj(project_folder, 'debug_log.html'), 'w', encoding='utf8') as f: - res_to_t = [] - node = root - while True: - show_html = node.string.replace('\n','#{show_html}#
') - else: - f.write(f'{show_html}
') - node = node.next - if node is None: break - - self.root = root - self.sp = res_to_t + import multiprocessing + manager = multiprocessing.Manager() + return_dict = manager.dict() + p = multiprocessing.Process( + target=split_subprocess, + args=(txt, project_folder, return_dict, opts)) + p.start() + p.join() + self.nodes = return_dict['nodes'] + self.sp = return_dict['segment_parts_for_gpt'] return self.sp + + class LatexPaperFileGroup(): + """ + use tokenizer to break down text according to max_token_limit + """ def __init__(self): self.file_paths = [] self.file_contents = [] @@ -371,7 +462,7 @@ class LatexPaperFileGroup(): def run_file_split(self, max_token_limit=1900): """ - 将长文本分离开来 + use tokenizer to break down text according to max_token_limit """ for index, file_content in enumerate(self.file_contents): if self.get_token_num(file_content) < max_token_limit: @@ -402,7 +493,7 @@ class LatexPaperFileGroup(): -def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None): +def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]): import time, os, re from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency from .latex_utils import LatexPaperFileGroup, merge_tex_files, LatexPaperSplit, 寻找Latex主文件 @@ -411,7 +502,7 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin maintex = 寻找Latex主文件(file_manifest, mode) chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。')) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - time.sleep(5) + time.sleep(3) # <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ----------> main_tex_basename = os.path.basename(maintex) @@ -431,8 +522,10 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin f.write(merged_content) # <-------- 精细切分latex文件 ----------> + chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。')) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 lps = LatexPaperSplit() - res = lps.split(merged_content, project_folder) + res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数 # <-------- 拆分过长的latex片段 ----------> pfg = LatexPaperFileGroup() @@ -480,7 +573,8 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。" final_tex = lps.merge_result(pfg.file_result, mode, msg) with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f: - f.write(final_tex) + if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex) + # <-------- 整理结果, 退出 ----------> chatbot.append((f"完成了吗?", 'GPT结果已输出, 正在编译PDF')) @@ -507,7 +601,8 @@ def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work f.writelines(file_lines) return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines except: - return False, 0, [0] + print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.") + return False, -1, [-1] def compile_latex_with_timeout(command, timeout=60): @@ -522,12 +617,12 @@ def compile_latex_with_timeout(command, timeout=60): return False return True -def 编译Latex差别(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder): +def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder): import os, time current_dir = os.getcwd() n_fix = 1 max_try = 32 - chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,则大概率是卡死在Latex里面了。不幸卡死时请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history) + chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history) chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面 yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面 diff --git a/crazy_functions/数学动画生成manim.py b/crazy_functions/数学动画生成manim.py index 5851b9c..26e61b1 100644 --- a/crazy_functions/数学动画生成manim.py +++ b/crazy_functions/数学动画生成manim.py @@ -8,7 +8,7 @@ def inspect_dependency(chatbot, history): import manim return True except: - chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install manimgl```"]) + chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install manim manimgl```"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return False diff --git a/docs/Dockerfile+NoLocal+Latex b/docs/Dockerfile+NoLocal+Latex new file mode 100644 index 0000000..0f9ac8a --- /dev/null +++ b/docs/Dockerfile+NoLocal+Latex @@ -0,0 +1,27 @@ +# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM +# - 1 修改 `config.py` +# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/Dockerfile+NoLocal+Latex . +# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex + +FROM fuqingxu/python311_texlive_ctex:latest + +# 指定路径 +WORKDIR /gpt + +ARG useProxyNetwork='' + +RUN $useProxyNetwork pip3 install gradio openai numpy arxiv rich -i https://pypi.douban.com/simple/ +RUN $useProxyNetwork pip3 install colorama Markdown pygments pymupdf -i https://pypi.douban.com/simple/ + +# 装载项目文件 +COPY . . + + +# 安装依赖 +RUN $useProxyNetwork pip3 install -r requirements.txt -i https://pypi.douban.com/simple/ + +# 可选步骤,用于预热模块 +RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' + +# 启动 +CMD ["python3", "-u", "main.py"] diff --git a/docs/translate_english.json b/docs/translate_english.json index b7a582d..13b0869 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -58,6 +58,8 @@ "连接网络回答问题": "ConnectToNetworkToAnswerQuestions", "联网的ChatGPT": "ChatGPTConnectedToNetwork", "解析任意code项目": "ParseAnyCodeProject", + "读取知识库作答": "ReadKnowledgeArchiveAnswerQuestions", + "知识库问答": "UpdateKnowledgeArchive", "同时问询_指定模型": "InquireSimultaneously_SpecifiedModel", "图片生成": "ImageGeneration", "test_解析ipynb文件": "Test_ParseIpynbFile", diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index efafa39..195c030 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -85,6 +85,15 @@ model_info = { "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, + + "gpt-3.5-turbo-16k": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 1024*16, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, "gpt-4": { "fn_with_ui": chatgpt_ui, diff --git a/toolbox.py b/toolbox.py index 861051d..98a280e 100644 --- a/toolbox.py +++ b/toolbox.py @@ -562,7 +562,9 @@ def on_report_generated(files, chatbot): if len(report_files) == 0: return None, chatbot # files.extend(report_files) - chatbot.append(['报告如何远程获取?', '报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。']) + file_links = '' + for f in report_files: file_links += f'