Compare commits

..

172 Commits

Author SHA1 Message Date
37172906ef 修复文件导出的bug 2023-06-29 14:55:55 +08:00
3b78e0538b 修复插件demo的图像显示的问题 2023-06-29 14:52:58 +08:00
d8f9ac71d0 Merge pull request #907 from Xminry/master
feat:联网搜索功能,cn.bing.com版,国内可用
2023-06-29 12:44:32 +08:00
aced272d3c 微调插件提示 2023-06-29 12:43:50 +08:00
aff77a086d Merge branch 'master' of https://github.com/Xminry/gpt_academic into Xminry-master 2023-06-29 12:38:43 +08:00
49253c4dc6 [arxiv trans] add html comparison to zip file 2023-06-29 12:29:49 +08:00
1a00093015 修复提示 2023-06-29 12:15:52 +08:00
64f76e7401 3.42 2023-06-29 11:32:19 +08:00
eb4c07997e 修复Latex矫错和本地Latex论文翻译的问题 2023-06-29 11:30:42 +08:00
99cf7205c3 feat:联网搜索功能,cn.bing.com版,国内可用 2023-06-28 10:30:08 +08:00
d684b4cdb3 Merge pull request #905 from Xminry/master
Update 理解PDF文档内容.py
2023-06-27 23:37:25 +08:00
601a95c948 Merge pull request #881 from OverKit/master
update latex_utils.py
2023-06-27 19:20:17 +08:00
e18bef2e9c add item breaker 2023-06-27 19:16:05 +08:00
f654c1af31 merge regex expressions 2023-06-27 18:59:56 +08:00
e90048a671 Merge branch 'master' of https://github.com/OverKit/gpt_academic into OverKit-master 2023-06-27 16:14:12 +08:00
ea624b1510 Merge pull request #889 from dackdawn/master
添加0613模型的声明
2023-06-27 15:03:15 +08:00
057e3dda3c Merge branch 'master' of https://github.com/dackdawn/gpt_academic into dackdawn-master 2023-06-27 15:02:22 +08:00
4290821a50 Update 理解PDF文档内容.py 2023-06-27 01:57:31 +08:00
280e14d7b7 更新Latex模块的docker-compose 2023-06-26 09:59:14 +08:00
9f0cf9fb2b arxiv PDF 引用 2023-06-25 23:30:31 +08:00
b8560b7510 修正误判latex模板文件的bug 2023-06-25 22:46:16 +08:00
d841d13b04 add arxiv translation test samples 2023-06-25 22:12:44 +08:00
efda9e5193 Merge pull request #897 from Ranhuiryan/master
添加azure-gpt35选项
2023-06-24 17:59:51 +10:00
33d2e75aac add azure-gpt35 to model list 2023-06-21 16:19:49 +08:00
74941170aa update azure use instruction 2023-06-21 16:19:26 +08:00
cd38949903 当遇到错误时,回滚到原文 2023-06-21 11:53:57 +10:00
d87f1eb171 更新接入azure的说明 2023-06-21 11:38:59 +10:00
cd1e4e1ba7 Merge pull request #797 from XiaojianTang/master
增加azure openai api的支持
2023-06-21 11:23:41 +10:00
cf5f348d70 update test samples 2023-06-21 11:20:31 +10:00
0ee25f475e Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-20 23:07:51 +08:00
1fede6df7f temp 2023-06-20 23:05:17 +08:00
22a65cd163 Create build-with-latex.yml 2023-06-21 00:55:24 +10:00
538b041ea3 Merge pull request #890 from Mcskiller/master
Update README.md
2023-06-21 00:53:26 +10:00
d7b056576d add latex docker-compose 2023-06-21 00:52:58 +10:00
cb0bb6ab4a fix minor bugs 2023-06-21 00:41:33 +10:00
bf955aaf12 fix bugs 2023-06-20 23:12:30 +10:00
61eb0da861 fix encoding bug 2023-06-20 22:08:09 +10:00
5da633d94d Update README.md
Fix the error URL for the git clone.
2023-06-20 19:10:11 +08:00
f3e4e26e2f 添加0613模型的声明
openai对gpt-3.5-turbo的RPM限制是3,而gpt-3.5-turbo-0613的RPM是60,虽然两个模型的内容是一致的,但是选定特定模型可以获得更高的RPM和TPM
2023-06-19 21:40:26 +08:00
af7734dd35 avoid file fusion 2023-06-19 16:57:11 +10:00
d5bab093f9 rename function names 2023-06-19 15:17:33 +10:00
f94b167dc2 Merge branch 'master' into overkit-master 2023-06-19 14:53:51 +10:00
951d5ec758 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-19 14:52:25 +10:00
016d8ee156 Merge remote-tracking branch 'origin/master' into OverKit-master 2023-06-19 14:51:59 +10:00
dca9ec4bae Merge branch 'master' of https://github.com/OverKit/gpt_academic into OverKit-master 2023-06-19 14:49:50 +10:00
a06e43c96b Update README.md 2023-06-18 16:15:37 +08:00
29c6bfb6cb Update README.md 2023-06-18 16:12:06 +08:00
8d7ee975a0 Update README.md 2023-06-18 16:10:45 +08:00
4bafbb3562 Update Latex输出PDF结果.py 2023-06-18 15:54:23 +08:00
7fdf0a8e51 调整区分内容的代码 2023-06-18 15:51:29 +08:00
2bb13b4677 Update README.md 2023-06-18 15:44:42 +08:00
9a5a509dd9 修复关于abstract的搜索 2023-06-17 19:27:21 +08:00
cbcb98ef6a Merge pull request #872 from Skyzayre/master
Update README.md
2023-06-16 17:54:39 +08:00
bb864c6313 增加一些提示文字 2023-06-16 17:33:19 +08:00
6d849eeb12 修复Langchain插件的bug 2023-06-16 17:33:03 +08:00
ef752838b0 Update README.md 2023-06-15 02:07:43 +08:00
73d4a1ff4b Update README.md 2023-06-14 10:15:47 +08:00
8c62f21aa6 3.41增加gpt-3.5-16k的支持 2023-06-14 09:57:09 +08:00
c40ebfc21f 将gpt-3.5-16k作为加入支持列表 2023-06-14 09:50:15 +08:00
c365ea9f57 Update README.md 2023-06-13 16:13:19 +08:00
12d66777cc Merge pull request #864 from OverKit/master
check letter % after removing spaces or tabs in the left
2023-06-12 15:21:35 +08:00
9ac3d0d65d check letter % after removing spaces or tabs in the left 2023-06-12 10:09:52 +08:00
9fd212652e 专业词汇声明 2023-06-12 09:45:59 +08:00
790a1cf12a 添加一些提示 2023-06-11 20:12:25 +08:00
3ecf2977a8 修复caption翻译 2023-06-11 18:23:54 +08:00
aeddf6b461 Update Latex输出PDF结果.py 2023-06-11 10:20:49 +08:00
ce0d8b9dab 虚空终端插件雏形 2023-06-11 01:36:23 +08:00
3c00e7a143 file link in chatbot 2023-06-10 21:45:38 +08:00
ef1bfdd60f update pip install notice 2023-06-08 21:29:10 +08:00
e48d92e82e update translation 2023-06-08 18:34:06 +08:00
110510997f Update README.md 2023-06-08 12:48:52 +08:00
b52695845e Update README.md 2023-06-08 12:44:05 +08:00
f30c9c6d3b Update README.md 2023-06-08 12:43:13 +08:00
ff5403eac6 Update README.md 2023-06-08 12:42:24 +08:00
f9226d92be Update version 2023-06-08 12:24:14 +08:00
a0ea5d0e9e Update README.md 2023-06-08 12:22:03 +08:00
ce6f11d200 Update README.md 2023-06-08 12:20:49 +08:00
10b3001dba Update README.md 2023-06-08 12:19:11 +08:00
e2de1d76ea Update README.md 2023-06-08 12:18:31 +08:00
77cc141a82 Update README.md 2023-06-08 12:14:02 +08:00
526b4d8ecd Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-07 11:09:20 +08:00
149db621ec langchain check depends 2023-06-07 11:09:12 +08:00
2e1bb7311c Merge pull request #848 from MengDanzz/master
将Dockerfile COPY分成两段,缓存依赖库,重新构建不需要重新安装
2023-06-07 10:44:09 +08:00
dae65fd2c2 在copy ..后在运行一次pip install检查依赖变化 2023-06-07 10:43:45 +08:00
9aafb2ee47 非pypi包加入COPY 2023-06-07 09:18:57 +08:00
6bc91bd02e Merge branch 'binary-husky:master' into master 2023-06-07 09:15:44 +08:00
8ef7344101 fix subprocess bug in Windows 2023-06-06 18:57:52 +08:00
40da1b0afe 将Latex分解程序放到子进程执行 2023-06-06 18:44:00 +08:00
c65def90f3 将Dockerfile COPY分成两段,缓存依赖库,重新构建不需要重新安装 2023-06-06 14:36:30 +08:00
ddeaf76422 check latex in PATH 2023-06-06 00:23:00 +08:00
f23b66dec2 update Dockerfile with Latex 2023-06-05 23:49:54 +08:00
a26b294817 Write Some Docstring 2023-06-05 23:44:59 +08:00
66018840da declare resp 2023-06-05 23:24:41 +08:00
cea2144f34 fix test samples 2023-06-05 23:11:21 +08:00
7f5be93c1d 修正一些正则匹配bug 2023-06-05 22:57:39 +08:00
85b838b302 add Linux support 2023-06-04 23:06:35 +08:00
27f97ba92a remove previous results 2023-06-04 16:55:36 +08:00
14269eba98 建立本地arxiv缓存区 2023-06-04 16:08:01 +08:00
d5c9bc9f0a 提高iffalse搜索优先级 2023-06-04 14:15:59 +08:00
b0fed3edfc consider iffalse state 2023-06-04 14:06:02 +08:00
7296d054a2 patch latex segmentation 2023-06-04 13:56:15 +08:00
d57c7d352d improve quality 2023-06-03 23:54:30 +08:00
3fd2927ea3 改善 2023-06-03 23:33:45 +08:00
b745074160 avoid most compile failure 2023-06-03 23:33:32 +08:00
70ee810133 improve success rate 2023-06-03 19:39:19 +08:00
68fea9e79b fix test 2023-06-03 18:09:39 +08:00
f82bf91aa8 test example 2023-06-03 18:06:39 +08:00
dde9edcc0c fix a fatal mistake 2023-06-03 17:49:22 +08:00
66c78e459e 修正提示 2023-06-03 17:18:38 +08:00
de54102303 修改提醒 2023-06-03 16:43:26 +08:00
7c7d2d8a84 Latex的minipage补丁 2023-06-03 16:16:32 +08:00
834f989ed4 考虑有人用input不加.tex的情况 2023-06-03 15:42:22 +08:00
b658ee6e04 修复arxiv翻译的一些问题 2023-06-03 15:36:55 +08:00
1a60280ea0 添加警告 2023-06-03 14:40:37 +08:00
991cb7d272 warning 2023-06-03 14:39:40 +08:00
463991cfb2 fix bug 2023-06-03 14:24:06 +08:00
06f10b5fdc fix zh cite bug 2023-06-03 14:17:58 +08:00
d275d012c6 Merge branch 'langchain' into master 2023-06-03 13:53:39 +08:00
c5d1ea3e21 update langchain version 2023-06-03 13:53:34 +08:00
0022b92404 update prompt 2023-06-03 13:50:39 +08:00
ef61221241 latex auto translation milestone 2023-06-03 13:46:40 +08:00
5a1831db98 成功! 2023-06-03 00:34:23 +08:00
a643f8b0db debug translation 2023-06-02 23:06:01 +08:00
601712fd0a latex toolchain 2023-06-02 21:44:11 +08:00
e769f831c7 latex 2023-06-02 14:07:04 +08:00
dcd952671f Update main.py 2023-06-01 15:56:52 +08:00
06564df038 Merge branch 'langchain' 2023-06-01 09:39:34 +08:00
2f037f30d5 暂时移除插件锁定 2023-06-01 09:39:00 +08:00
efedab186d Merge branch 'master' into langchain 2023-06-01 00:10:22 +08:00
f49cae5116 Update Langchain知识库.py 2023-06-01 00:09:07 +08:00
2b620ccf2e 更新提示 2023-06-01 00:07:19 +08:00
a1b7a4da56 更新测试案例 2023-06-01 00:03:27 +08:00
61b0e49fed fix some bugs in linux 2023-05-31 23:49:25 +08:00
f60dc371db 12 2023-05-31 10:42:44 +08:00
0a3433b8ac Update README.md 2023-05-31 10:37:08 +08:00
31bce54abb Update README.md 2023-05-31 10:34:21 +08:00
5db1530717 Merge branch 'langchain' of github.com:binary-husky/chatgpt_academic into langchain 2023-05-30 20:08:47 +08:00
c32929fd11 Merge branch 'master' into langchain 2023-05-30 20:08:15 +08:00
3e4c2b056c knowledge base 2023-05-30 19:55:38 +08:00
e79e9d7d23 Merge branch 'master' into langchain 2023-05-30 18:31:39 +08:00
d175b93072 Update README.md.Italian.md 2023-05-30 17:27:41 +08:00
ed254687d2 Update README.md.Italian.md 2023-05-30 17:26:12 +08:00
c0392f7074 Update README.md.Korean.md 2023-05-30 17:25:32 +08:00
f437712af7 Update README.md.Portuguese.md 2023-05-30 17:22:46 +08:00
6d1ea643e9 langchain 2023-05-30 12:54:42 +08:00
9e84cfcd46 Update README.md 2023-05-29 19:48:34 +08:00
897695d29f 修复二级路径的文件屏蔽 2023-05-28 20:25:35 +08:00
1dcc2873d2 修复Gradio配置泄露的问题 2023-05-28 20:23:47 +08:00
42cf738a31 修复一些情况下复制键失效的问题 2023-05-28 18:12:48 +08:00
e4646789af Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-05-28 16:07:29 +08:00
e6c3aabd45 docker-compose check 2023-05-28 16:07:24 +08:00
6789d1fab4 Update README.md 2023-05-28 11:21:50 +08:00
7a733f00a2 Update README.md 2023-05-28 00:19:23 +08:00
dd55888f0e Update README.md 2023-05-28 00:16:45 +08:00
0327df22eb Update README.md 2023-05-28 00:14:54 +08:00
e544f5e9d0 Update README.md 2023-05-27 23:45:15 +08:00
0fad4f44a4 fix dockerfile 2023-05-27 23:36:42 +08:00
1240dd6f26 local gradio 2023-05-27 23:29:22 +08:00
d6be947177 修复gradio的依赖安装问题 2023-05-27 23:10:44 +08:00
3cfbdce9f2 remove limitation for now 2023-05-27 22:25:50 +08:00
1ee471ff57 fix reminder 2023-05-27 22:20:46 +08:00
25ccecf8e3 Update README.md 2023-05-27 21:56:43 +08:00
9e991bfa3e Update requirements.txt 2023-05-27 21:56:16 +08:00
221efd0193 Update README.md 2023-05-27 21:11:25 +08:00
976b9bf65f Update README.md 2023-05-27 21:04:52 +08:00
ae5783e383 修复gradio复制按钮BUG 2023-05-27 20:20:45 +08:00
30224af042 Merge pull request #798 from Bit0r/master
🐛 匹配latex注释的正则表达式
2023-05-27 14:03:07 +08:00
8ff7c15cd8 🐛 匹配latex注释的正则表达式 2023-05-27 11:19:48 +08:00
f3205994ea 增加azure openai api的支持 2023-05-26 23:22:12 +08:00
ec8cc48a4d Add ProxyNetworkActivate 2023-05-25 23:48:18 +08:00
5d75c578b9 fix dependency 2023-05-25 15:28:27 +08:00
cd411c2eea newbing-free deps 2023-05-25 15:12:54 +08:00
11 changed files with 38 additions and 72 deletions

View File

@ -1,15 +1,3 @@
---
title: ChatImprovement
emoji: 😻
colorFrom: blue
colorTo: blue
sdk: gradio
sdk_version: 3.32.0
app_file: app.py
pinned: false
---
# ChatGPT 学术优化
> **Note**
>
> 2023.5.27 对Gradio依赖进行了调整Fork并解决了官方Gradio的若干Bugs。请及时**更新代码**并重新更新pip依赖。安装依赖时请严格选择`requirements.txt`中**指定的版本**

View File

@ -45,9 +45,10 @@ WEB_PORT = -1
# 如果OpenAI不响应网络卡顿、代理失败、KEY失效重试的次数限制
MAX_RETRY = 2
# OpenAI模型选择是gpt4现在只对申请成功的人开放
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
AVAIL_LLM_MODELS = ["newbing-free", "gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt35", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"]
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"

View File

@ -27,24 +27,6 @@ def set_forbidden_text(text, mask, pattern, flags=0):
mask[res.span()[0]:res.span()[1]] = PRESERVE
return text, mask
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
"""
Move area out of preserve area (make text editable for GPT)
count the number of the braces so as to catch compelete text area.
e.g.
\begin{abstract} blablablablablabla. \end{abstract}
"""
if isinstance(pattern, list): pattern = '|'.join(pattern)
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
if not forbid_wrapper:
mask[res.span()[0]:res.span()[1]] = TRANSFORM
else:
mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
return text, mask
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
"""
Add a preserve text area in this paper (text become untouchable for GPT).
@ -344,7 +326,6 @@ def split_subprocess(txt, project_folder, return_dict, opts):
# reverse 操作必须放在最后
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\caption\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\abstract\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
text, mask = reverse_forbidden_text(text, mask, r"\\begin\{abstract\}(.*?)\\end\{abstract\}", re.DOTALL, forbid_wrapper=True)
root = convert_to_linklist(text, mask)
# 修复括号
@ -691,9 +672,10 @@ def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work
print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.")
return False, -1, [-1]
def compile_latex_with_timeout(command, cwd, timeout=60):
def compile_latex_with_timeout(command, timeout=60):
import subprocess
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
@ -717,24 +699,24 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
# https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
os.chdir(work_folder_original); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex'); os.chdir(current_dir)
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
os.chdir(work_folder_modified); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex'); os.chdir(current_dir)
if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')):
# 只有第二步成功,才能继续下面的步骤
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')):
ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original)
os.chdir(work_folder_original); ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux'); os.chdir(current_dir)
if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')):
ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified)
os.chdir(work_folder_modified); ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux'); os.chdir(current_dir)
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
os.chdir(work_folder_original); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex'); os.chdir(current_dir)
os.chdir(work_folder_modified); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex'); os.chdir(current_dir)
os.chdir(work_folder_original); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex'); os.chdir(current_dir)
os.chdir(work_folder_modified); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex'); os.chdir(current_dir)
if mode!='translate_zh':
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
@ -742,11 +724,13 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
os.chdir(work_folder); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex'); os.chdir(current_dir)
os.chdir(work_folder); ok = compile_latex_with_timeout(f'bibtex merge_diff.aux'); os.chdir(current_dir)
os.chdir(work_folder); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex'); os.chdir(current_dir)
os.chdir(work_folder); ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex'); os.chdir(current_dir)
# <--------------------->
os.chdir(current_dir)
# <---------- 检查结果 ----------->
results_ = ""
@ -782,6 +766,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
if not can_retry: break
os.chdir(current_dir)
return False # 失败啦

View File

@ -13,11 +13,11 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append((txt, "正在同时咨询gpt-3.5和gpt-4……"))
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口用&符号分隔
llm_kwargs['llm_model'] = 'gpt-3.5-turbo&gpt-4' # 支持任意数量的llm接口用&符号分隔
llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口用&符号分隔
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=txt, inputs_show_user=txt,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,

View File

@ -104,7 +104,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
meta_paper_info_list = meta_paper_info_list[batchsize:]
chatbot.append(["状态?",
"已经全部完成您可以试试让AI写一个Related Works例如您可以继续输入Write an academic \"Related Works\" section about \"你搜索的研究领域\" for me."])
"已经全部完成您可以试试让AI写一个Related Works例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."])
msg = '正常'
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
res = write_results_to_file(history)

Binary file not shown.

View File

@ -1,10 +1,8 @@
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
def main():
import subprocess, sys
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gradio-stable-fork'])
import gradio as gr
if gr.__version__ not in ['3.28.3','3.32.3']: assert False, "用 pip install -r requirements.txt 安装依赖"
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖详情信息见requirements.txt"
from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
@ -56,7 +54,6 @@ def main():
cancel_handles = []
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
gr.HTML(title_html)
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”Duplicate Space按钮<font color="#FF00FF">使用时先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”Duplicate Space之前填入API_KEY或进行提问否则您的API_KEY将极可能被空间所有者攫取<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
with gr_L1():
with gr_L2(scale=2):
@ -66,7 +63,7 @@ def main():
with gr_L2(scale=1):
with gr.Accordion("输入区", open=True) as area_input_primary:
with gr.Row():
txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥输入多个密钥时用英文逗号间隔。支持OpenAI密钥和API2D密钥共存。").style(container=False)
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
with gr.Row():
submitBtn = gr.Button("提交", variant="primary")
with gr.Row():
@ -200,7 +197,10 @@ def main():
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
auto_opentab_delay()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
server_name="0.0.0.0", server_port=PORT,
favicon_path="docs/logo.png", auth=AUTHENTICATION,
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
# 如果需要在二级路径下运行
# CUSTOM_PATH, = get_conf('CUSTOM_PATH')

View File

@ -152,7 +152,7 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
# chatglm 直接对齐到 chatglm2
# chatglm
"chatglm": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
@ -161,15 +161,6 @@ model_info = {
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"chatglm2": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# newbing
"newbing": {
"fn_with_ui": newbing_ui,

View File

@ -40,12 +40,12 @@ class GetGLMHandle(Process):
while True:
try:
if self.chatglm_model is None:
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
device, = get_conf('LOCAL_MODEL_DEVICE')
if device=='cpu':
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float()
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
else:
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
self.chatglm_model = self.chatglm_model.eval()
break
else:

View File

@ -1,3 +1,4 @@
./docs/gradio-3.32.2-py3-none-any.whl
tiktoken>=0.3.3
requests[socks]
transformers
@ -14,4 +15,4 @@ pymupdf
openai
numpy
arxiv
rich
rich

View File

@ -842,4 +842,4 @@ def objload(file='objdump.tmp'):
return
with open(file, 'rb') as f:
return pickle.load(f)