Compare commits

...

229 Commits

Author SHA1 Message Date
b585ae6835 add fine tune model 2023-07-10 03:00:49 +08:00
3cda37a6a6 trans code 2023-07-10 02:12:00 +08:00
127b1e5646 delete tests 2023-07-10 01:41:00 +08:00
ee23e8a851 experimental 2023-07-10 01:03:34 +08:00
acddb86f3a 小而美 2023-07-10 00:20:14 +08:00
4fde0120ab 完善提醒 2023-07-10 00:08:59 +08:00
592a354eef 完善插件提示 2023-07-10 00:06:48 +08:00
bd66cf3d8b 修复对话历史的问题 2023-07-10 00:02:22 +08:00
e6e5174734 改名 2023-07-09 23:47:10 +08:00
13ade82677 改善语音辅助 2023-07-09 23:18:06 +08:00
ce9eb8d20a UP 2023-07-09 21:18:04 +08:00
dd47c0a284 merge changes 2023-07-09 20:55:37 +08:00
f725ab1b31 Merge branch 'master' into improve_ui_master 2023-07-09 20:47:53 +08:00
7ce4192c52 add comments 2023-07-09 17:25:50 +08:00
c06aafb642 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-07-09 16:01:15 +08:00
b298c5416c 完善PDF总结插件 2023-07-09 16:01:08 +08:00
94abf302cb 修正模板注释 2023-07-09 12:50:51 +08:00
fcc5534e66 ChatGLM 黑盒微调插件 2023-07-09 03:37:47 +08:00
56c0e4d575 3.44说明 2023-07-09 01:21:18 +08:00
8a10db618e Merge branch 'master-interact' 2023-07-09 01:05:04 +08:00
1fe66f0291 优化azure的体验 2023-07-09 00:20:58 +08:00
ced977c443 修复双dollar公式匹配bug 2023-07-08 22:23:29 +08:00
6c2ffbae52 Update README.md 2023-07-08 19:17:35 +08:00
be2f54fac9 Update README.md 2023-07-08 18:21:20 +08:00
87b5e56378 Update requirements.txt 2023-07-08 18:10:33 +08:00
3a5764ed34 Update requirements.txt 2023-07-08 17:59:27 +08:00
91aee50ea7 Chuanhu 主题 2023-07-07 20:12:06 +08:00
e5ccedf491 名称修订 2023-07-07 20:08:26 +08:00
f620666a58 Merge branch 'improve_ui_master' of https://github.com/binary-husky/chatgpt_academic into improve_ui_master 2023-07-07 19:51:48 +08:00
594c63e5d6 主题修正 2023-07-07 19:51:09 +08:00
67d9051890 update error message 2023-07-07 17:41:43 +08:00
be96232127 Merge pull request #933 from binary-husky/master-latex-patch
Latex File Name Bug Patch
2023-07-07 16:57:58 +08:00
3b5bc7a784 Update use_azure.md 2023-07-07 10:55:22 +08:00
5e92f437a1 Update use_azure.md 2023-07-07 10:54:21 +08:00
eabd9d312f 3.43 2023-07-07 10:47:30 +08:00
0da6fe78ac 统一azure-gpt-3.5的格式 2023-07-07 10:45:11 +08:00
be990380a0 Merge branch 'master' of https://github.com/binary-husky/chatgpt_academic into master 2023-07-07 10:42:41 +08:00
9c0bc48420 修复Azure OpenAI接口的各种bug 2023-07-07 10:42:38 +08:00
5c0d34793e Latex File Name Bug Patch 2023-07-07 00:09:50 +08:00
37fc550652 Update config.py 2023-07-06 10:47:06 +08:00
2c1d6ac212 修复Organization的bug 2023-07-05 21:14:13 +08:00
8c699c1b26 Update README.md 2023-07-05 21:04:28 +08:00
c620fa9011 Update README.md 2023-07-05 20:55:59 +08:00
f16fd60211 Update README.md 2023-07-05 20:34:22 +08:00
9674e59d26 更新说明 2023-07-05 20:22:57 +08:00
643c5e125a 更新提醒 2023-07-05 20:10:18 +08:00
e5099e1daa 极少数情况下,openai的官方KEY需要伴随组织编码 2023-07-05 20:05:20 +08:00
3e621bbec1 Update Dockerfile 2023-07-05 14:37:54 +08:00
bb1d5a61c0 update translation matrix 2023-07-05 14:32:33 +08:00
fd3d0be2d8 Update config.py 2023-07-05 14:13:04 +08:00
ae623258f3 更详细的配置提示 2023-07-05 14:10:06 +08:00
cda281f08b 把newbing的cookie加回来 2023-07-05 13:48:50 +08:00
9f8e7a6efa 显示更详细的报错 2023-07-05 13:35:11 +08:00
57643dd2b6 update error msg 2023-07-05 13:01:06 +08:00
6bc8a78cfe No more cookie for NewBing! 2023-07-05 12:45:10 +08:00
d2700e97fb 更新openai失效提醒 2023-07-05 11:03:11 +08:00
c4dd81dc9a Update Dockerfile 2023-07-04 12:28:52 +08:00
e9b06d7cde Merge pull request #927 from QuantumRoseinAmethystVase/master
Update 批量总结PDF文档.py
2023-07-04 12:24:17 +08:00
6e6ea69611 Unsplash恢复了 2023-07-04 12:16:01 +08:00
b082b5eb1b 将阿里云TOKEN移动到config中 2023-07-03 23:20:25 +08:00
9648d78453 重构异步代码,增强可读性 2023-07-03 22:44:10 +08:00
16c17eb077 Update 批量总结PDF文档.py
Improve the output.
2023-07-03 18:55:16 +08:00
2dc8718041 语音模组第一个版本 2023-07-03 00:13:10 +08:00
a330d6636e error 2023-07-02 22:54:05 +08:00
322c4be145 同步音频输入 2023-07-02 14:42:12 +08:00
a3596ff60d audio 2023-07-02 01:05:20 +08:00
e11d8132f8 add green theme 2023-07-01 23:02:44 +08:00
59877dd728 Local variable 'result' might be referenced before assignment, add else result 2023-07-01 22:27:11 +08:00
5f7ffef238 增加基础功能判空 2023-07-01 22:04:42 +08:00
41c10f5688 report image generation error in UI 2023-07-01 02:28:32 +08:00
d7ac99f603 更正错误提示 2023-07-01 01:46:43 +08:00
1616daae6a Merge branch 'master' of https://github.com/binary-husky/chatgpt_academic into master 2023-07-01 00:17:30 +08:00
a1092d8f92 提供自动清空输入框的选项 2023-07-01 00:17:26 +08:00
34ca9f138f Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-30 14:56:28 +08:00
df3f1aa3ca 更正ChatGLM2的默认Token数量 2023-06-30 14:56:22 +08:00
bf805cf477 Merge branch 'master' of https://github.com/binary-husky/chatgpt_academic into master 2023-06-30 13:09:51 +08:00
ecb08e69be remove find picture core functionality 2023-06-30 13:08:54 +08:00
28c1e3f11b Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-30 12:06:33 +08:00
403667aec1 upgrade chatglm to chatglm2 2023-06-30 12:06:28 +08:00
22f377e2fb fix multi user cwd shift 2023-06-30 11:05:47 +08:00
37172906ef 修复文件导出的bug 2023-06-29 14:55:55 +08:00
3b78e0538b 修复插件demo的图像显示的问题 2023-06-29 14:52:58 +08:00
d8f9ac71d0 Merge pull request #907 from Xminry/master
feat:联网搜索功能,cn.bing.com版,国内可用
2023-06-29 12:44:32 +08:00
aced272d3c 微调插件提示 2023-06-29 12:43:50 +08:00
aff77a086d Merge branch 'master' of https://github.com/Xminry/gpt_academic into Xminry-master 2023-06-29 12:38:43 +08:00
49253c4dc6 [arxiv trans] add html comparison to zip file 2023-06-29 12:29:49 +08:00
1a00093015 修复提示 2023-06-29 12:15:52 +08:00
64f76e7401 3.42 2023-06-29 11:32:19 +08:00
eb4c07997e 修复Latex矫错和本地Latex论文翻译的问题 2023-06-29 11:30:42 +08:00
99cf7205c3 feat:联网搜索功能,cn.bing.com版,国内可用 2023-06-28 10:30:08 +08:00
d684b4cdb3 Merge pull request #905 from Xminry/master
Update 理解PDF文档内容.py
2023-06-27 23:37:25 +08:00
601a95c948 Merge pull request #881 from OverKit/master
update latex_utils.py
2023-06-27 19:20:17 +08:00
e18bef2e9c add item breaker 2023-06-27 19:16:05 +08:00
f654c1af31 merge regex expressions 2023-06-27 18:59:56 +08:00
e90048a671 Merge branch 'master' of https://github.com/OverKit/gpt_academic into OverKit-master 2023-06-27 16:14:12 +08:00
ea624b1510 Merge pull request #889 from dackdawn/master
添加0613模型的声明
2023-06-27 15:03:15 +08:00
057e3dda3c Merge branch 'master' of https://github.com/dackdawn/gpt_academic into dackdawn-master 2023-06-27 15:02:22 +08:00
4290821a50 Update 理解PDF文档内容.py 2023-06-27 01:57:31 +08:00
280e14d7b7 更新Latex模块的docker-compose 2023-06-26 09:59:14 +08:00
9f0cf9fb2b arxiv PDF 引用 2023-06-25 23:30:31 +08:00
b8560b7510 修正误判latex模板文件的bug 2023-06-25 22:46:16 +08:00
d841d13b04 add arxiv translation test samples 2023-06-25 22:12:44 +08:00
efda9e5193 Merge pull request #897 from Ranhuiryan/master
添加azure-gpt35选项
2023-06-24 17:59:51 +10:00
33d2e75aac add azure-gpt35 to model list 2023-06-21 16:19:49 +08:00
74941170aa update azure use instruction 2023-06-21 16:19:26 +08:00
cd38949903 当遇到错误时,回滚到原文 2023-06-21 11:53:57 +10:00
d87f1eb171 更新接入azure的说明 2023-06-21 11:38:59 +10:00
cd1e4e1ba7 Merge pull request #797 from XiaojianTang/master
增加azure openai api的支持
2023-06-21 11:23:41 +10:00
cf5f348d70 update test samples 2023-06-21 11:20:31 +10:00
0ee25f475e Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-20 23:07:51 +08:00
1fede6df7f temp 2023-06-20 23:05:17 +08:00
22a65cd163 Create build-with-latex.yml 2023-06-21 00:55:24 +10:00
538b041ea3 Merge pull request #890 from Mcskiller/master
Update README.md
2023-06-21 00:53:26 +10:00
d7b056576d add latex docker-compose 2023-06-21 00:52:58 +10:00
cb0bb6ab4a fix minor bugs 2023-06-21 00:41:33 +10:00
bf955aaf12 fix bugs 2023-06-20 23:12:30 +10:00
61eb0da861 fix encoding bug 2023-06-20 22:08:09 +10:00
5da633d94d Update README.md
Fix the error URL for the git clone.
2023-06-20 19:10:11 +08:00
f3e4e26e2f 添加0613模型的声明
openai对gpt-3.5-turbo的RPM限制是3,而gpt-3.5-turbo-0613的RPM是60,虽然两个模型的内容是一致的,但是选定特定模型可以获得更高的RPM和TPM
2023-06-19 21:40:26 +08:00
af7734dd35 avoid file fusion 2023-06-19 16:57:11 +10:00
d5bab093f9 rename function names 2023-06-19 15:17:33 +10:00
f94b167dc2 Merge branch 'master' into overkit-master 2023-06-19 14:53:51 +10:00
951d5ec758 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-19 14:52:25 +10:00
016d8ee156 Merge remote-tracking branch 'origin/master' into OverKit-master 2023-06-19 14:51:59 +10:00
dca9ec4bae Merge branch 'master' of https://github.com/OverKit/gpt_academic into OverKit-master 2023-06-19 14:49:50 +10:00
a06e43c96b Update README.md 2023-06-18 16:15:37 +08:00
29c6bfb6cb Update README.md 2023-06-18 16:12:06 +08:00
8d7ee975a0 Update README.md 2023-06-18 16:10:45 +08:00
4bafbb3562 Update Latex输出PDF结果.py 2023-06-18 15:54:23 +08:00
7fdf0a8e51 调整区分内容的代码 2023-06-18 15:51:29 +08:00
2bb13b4677 Update README.md 2023-06-18 15:44:42 +08:00
9a5a509dd9 修复关于abstract的搜索 2023-06-17 19:27:21 +08:00
cbcb98ef6a Merge pull request #872 from Skyzayre/master
Update README.md
2023-06-16 17:54:39 +08:00
bb864c6313 增加一些提示文字 2023-06-16 17:33:19 +08:00
6d849eeb12 修复Langchain插件的bug 2023-06-16 17:33:03 +08:00
ef752838b0 Update README.md 2023-06-15 02:07:43 +08:00
73d4a1ff4b Update README.md 2023-06-14 10:15:47 +08:00
8c62f21aa6 3.41增加gpt-3.5-16k的支持 2023-06-14 09:57:09 +08:00
c40ebfc21f 将gpt-3.5-16k作为加入支持列表 2023-06-14 09:50:15 +08:00
c365ea9f57 Update README.md 2023-06-13 16:13:19 +08:00
12d66777cc Merge pull request #864 from OverKit/master
check letter % after removing spaces or tabs in the left
2023-06-12 15:21:35 +08:00
9ac3d0d65d check letter % after removing spaces or tabs in the left 2023-06-12 10:09:52 +08:00
9fd212652e 专业词汇声明 2023-06-12 09:45:59 +08:00
790a1cf12a 添加一些提示 2023-06-11 20:12:25 +08:00
3ecf2977a8 修复caption翻译 2023-06-11 18:23:54 +08:00
aeddf6b461 Update Latex输出PDF结果.py 2023-06-11 10:20:49 +08:00
ce0d8b9dab 虚空终端插件雏形 2023-06-11 01:36:23 +08:00
3c00e7a143 file link in chatbot 2023-06-10 21:45:38 +08:00
ef1bfdd60f update pip install notice 2023-06-08 21:29:10 +08:00
e48d92e82e update translation 2023-06-08 18:34:06 +08:00
110510997f Update README.md 2023-06-08 12:48:52 +08:00
b52695845e Update README.md 2023-06-08 12:44:05 +08:00
f30c9c6d3b Update README.md 2023-06-08 12:43:13 +08:00
ff5403eac6 Update README.md 2023-06-08 12:42:24 +08:00
f9226d92be Update version 2023-06-08 12:24:14 +08:00
a0ea5d0e9e Update README.md 2023-06-08 12:22:03 +08:00
ce6f11d200 Update README.md 2023-06-08 12:20:49 +08:00
10b3001dba Update README.md 2023-06-08 12:19:11 +08:00
e2de1d76ea Update README.md 2023-06-08 12:18:31 +08:00
77cc141a82 Update README.md 2023-06-08 12:14:02 +08:00
526b4d8ecd Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2023-06-07 11:09:20 +08:00
149db621ec langchain check depends 2023-06-07 11:09:12 +08:00
2e1bb7311c Merge pull request #848 from MengDanzz/master
将Dockerfile COPY分成两段,缓存依赖库,重新构建不需要重新安装
2023-06-07 10:44:09 +08:00
dae65fd2c2 在copy ..后在运行一次pip install检查依赖变化 2023-06-07 10:43:45 +08:00
9aafb2ee47 非pypi包加入COPY 2023-06-07 09:18:57 +08:00
6bc91bd02e Merge branch 'binary-husky:master' into master 2023-06-07 09:15:44 +08:00
8ef7344101 fix subprocess bug in Windows 2023-06-06 18:57:52 +08:00
40da1b0afe 将Latex分解程序放到子进程执行 2023-06-06 18:44:00 +08:00
c65def90f3 将Dockerfile COPY分成两段,缓存依赖库,重新构建不需要重新安装 2023-06-06 14:36:30 +08:00
ddeaf76422 check latex in PATH 2023-06-06 00:23:00 +08:00
f23b66dec2 update Dockerfile with Latex 2023-06-05 23:49:54 +08:00
a26b294817 Write Some Docstring 2023-06-05 23:44:59 +08:00
66018840da declare resp 2023-06-05 23:24:41 +08:00
cea2144f34 fix test samples 2023-06-05 23:11:21 +08:00
7f5be93c1d 修正一些正则匹配bug 2023-06-05 22:57:39 +08:00
85b838b302 add Linux support 2023-06-04 23:06:35 +08:00
27f97ba92a remove previous results 2023-06-04 16:55:36 +08:00
14269eba98 建立本地arxiv缓存区 2023-06-04 16:08:01 +08:00
d5c9bc9f0a 提高iffalse搜索优先级 2023-06-04 14:15:59 +08:00
b0fed3edfc consider iffalse state 2023-06-04 14:06:02 +08:00
7296d054a2 patch latex segmentation 2023-06-04 13:56:15 +08:00
d57c7d352d improve quality 2023-06-03 23:54:30 +08:00
3fd2927ea3 改善 2023-06-03 23:33:45 +08:00
b745074160 avoid most compile failure 2023-06-03 23:33:32 +08:00
70ee810133 improve success rate 2023-06-03 19:39:19 +08:00
68fea9e79b fix test 2023-06-03 18:09:39 +08:00
f82bf91aa8 test example 2023-06-03 18:06:39 +08:00
dde9edcc0c fix a fatal mistake 2023-06-03 17:49:22 +08:00
66c78e459e 修正提示 2023-06-03 17:18:38 +08:00
de54102303 修改提醒 2023-06-03 16:43:26 +08:00
7c7d2d8a84 Latex的minipage补丁 2023-06-03 16:16:32 +08:00
834f989ed4 考虑有人用input不加.tex的情况 2023-06-03 15:42:22 +08:00
b658ee6e04 修复arxiv翻译的一些问题 2023-06-03 15:36:55 +08:00
1a60280ea0 添加警告 2023-06-03 14:40:37 +08:00
991cb7d272 warning 2023-06-03 14:39:40 +08:00
463991cfb2 fix bug 2023-06-03 14:24:06 +08:00
06f10b5fdc fix zh cite bug 2023-06-03 14:17:58 +08:00
d275d012c6 Merge branch 'langchain' into master 2023-06-03 13:53:39 +08:00
c5d1ea3e21 update langchain version 2023-06-03 13:53:34 +08:00
0022b92404 update prompt 2023-06-03 13:50:39 +08:00
ef61221241 latex auto translation milestone 2023-06-03 13:46:40 +08:00
5a1831db98 成功! 2023-06-03 00:34:23 +08:00
a643f8b0db debug translation 2023-06-02 23:06:01 +08:00
601712fd0a latex toolchain 2023-06-02 21:44:11 +08:00
e769f831c7 latex 2023-06-02 14:07:04 +08:00
dcd952671f Update main.py 2023-06-01 15:56:52 +08:00
06564df038 Merge branch 'langchain' 2023-06-01 09:39:34 +08:00
2f037f30d5 暂时移除插件锁定 2023-06-01 09:39:00 +08:00
efedab186d Merge branch 'master' into langchain 2023-06-01 00:10:22 +08:00
f49cae5116 Update Langchain知识库.py 2023-06-01 00:09:07 +08:00
2b620ccf2e 更新提示 2023-06-01 00:07:19 +08:00
a1b7a4da56 更新测试案例 2023-06-01 00:03:27 +08:00
61b0e49fed fix some bugs in linux 2023-05-31 23:49:25 +08:00
f60dc371db 12 2023-05-31 10:42:44 +08:00
0a3433b8ac Update README.md 2023-05-31 10:37:08 +08:00
31bce54abb Update README.md 2023-05-31 10:34:21 +08:00
5db1530717 Merge branch 'langchain' of github.com:binary-husky/chatgpt_academic into langchain 2023-05-30 20:08:47 +08:00
c32929fd11 Merge branch 'master' into langchain 2023-05-30 20:08:15 +08:00
3e4c2b056c knowledge base 2023-05-30 19:55:38 +08:00
e79e9d7d23 Merge branch 'master' into langchain 2023-05-30 18:31:39 +08:00
d175b93072 Update README.md.Italian.md 2023-05-30 17:27:41 +08:00
ed254687d2 Update README.md.Italian.md 2023-05-30 17:26:12 +08:00
c0392f7074 Update README.md.Korean.md 2023-05-30 17:25:32 +08:00
f437712af7 Update README.md.Portuguese.md 2023-05-30 17:22:46 +08:00
6d1ea643e9 langchain 2023-05-30 12:54:42 +08:00
9e84cfcd46 Update README.md 2023-05-29 19:48:34 +08:00
897695d29f 修复二级路径的文件屏蔽 2023-05-28 20:25:35 +08:00
1dcc2873d2 修复Gradio配置泄露的问题 2023-05-28 20:23:47 +08:00
f3205994ea 增加azure openai api的支持 2023-05-26 23:22:12 +08:00
90 changed files with 17170 additions and 13393 deletions

44
.github/workflows/build-with-latex.yml vendored Normal file
View File

@ -0,0 +1,44 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image for Latex support
on:
push:
branches:
- 'master'
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}_with_latex
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
file: docs/GithubAction+NoLocal+Latex
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

1
.gitignore vendored
View File

@ -150,3 +150,4 @@ request_llm/jittorllms
multi-language
request_llm/moss
media
flagged

View File

@ -1,24 +1,34 @@
# 此Dockerfile适用于“无本地模型”的环境构建如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
# 如何构建: 先修改 `config.py` 然后 docker build -t gpt-academic .
# 如何运行: docker run --rm -it --net=host gpt-academic
# 此Dockerfile适用于“无本地模型”的环境构建如果需要使用chatglm等本地模型或者latex运行依赖请参考 docker-compose.yml
# 如何构建: 先修改 `config.py` 然后 `docker build -t gpt-academic . `
# 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
# 如何运行(其他操作系统选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
FROM python:3.11
# 非必要步骤更换pip源
RUN echo '[global]' > /etc/pip.conf && \
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
# 进入工作路径
WORKDIR /gpt
# 装载项目文件
COPY . .
# 安装依赖
# 安装大部分依赖利用Docker缓存加速以后的构建
COPY requirements.txt ./
COPY ./docs/gradio-3.32.2-py3-none-any.whl ./docs/gradio-3.32.2-py3-none-any.whl
RUN pip3 install -r requirements.txt
# 可选步骤,用于预热模块
# 装载项目文件,安装剩余依赖
COPY . .
RUN pip3 install -r requirements.txt
# 非必要步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
# 启动
CMD ["python3", "-u", "main.py"]

174
README.md
View File

@ -1,24 +1,26 @@
> **Note**
>
> 5月27日对gradio依赖进行了较大的修复和调整fork并解决了官方Gradio的一系列bug。但如果27日当天进行了更新可能会导致代码报错依赖缺失卡在loading界面等请及时更新到**最新版代码**并重新安装pip依赖即可。若给您带来困扰还请谅解。安装依赖时请严格选择requirements.txt中**指定的版本**
> 2023.7.5: Gradio依赖调整。请及时**更新代码**
>
> `pip install -r requirements.txt -i https://pypi.org/simple`
> 2023.7.8: pydantic出现兼容问题已修改 `requirements.txt`。安装依赖时,请严格选择`requirements.txt`中**指定的版本**
>
> `pip install -r requirements.txt`
# <img src="docs/logo.png" width="40" > GPT 学术优化 (GPT Academic)
**如果喜欢这个项目请给它一个Star如果你发明了更好用的快捷键或函数插件欢迎发pull requests**
# <div align=center><img src="docs/logo.png" width="40" > GPT 学术优化 (GPT Academic)</div>
**如果喜欢这个项目请给它一个Star如果您发明了好用的快捷键或函数插件欢迎发pull requests**
If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself.
To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
> **Note**
>
> 1.请注意只有**红颜色**标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR
> 1.请注意只有 **高亮(如红色)** 标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR
>
> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代您也可以随时自行点击相关函数插件调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。[安装方法](#installation)。
> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代您也可以随时自行点击相关函数插件调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。[安装方法](#installation)。
>
> 3.本项目兼容并鼓励尝试国产大语言模型chatglm和RWKV, 盘古等等。支持多个api-key共存可在配置文件中填写如`API_KEY="openai-key1,openai-key2,api2d-key3"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存可在配置文件中填写如`API_KEY="openai-key1,openai-key2,api2d-key3"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
@ -31,23 +33,24 @@ To translate this project to arbitary language with GPT, read and run [`multi_la
一键中英互译 | 一键中英互译
一键代码解释 | 显示代码、解释代码、生成代码、给代码加注释
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
模块化设计 | 支持自定义强大的[函数插件](https://github.com/binary-husky/chatgpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
[自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码
模块化设计 | 支持自定义强大的[函数插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
[自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java/Lua/...项目树
读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [函数插件] 一键解读latex/pdf论文全文并生成摘要
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [函数插件] 一键翻译或润色latex论文
批量注释生成 | [函数插件] 一键批量生成函数注释
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [函数插件] 看到上面5种语言的[README](https://github.com/binary-husky/chatgpt_academic/blob/master/docs/README_EN.md)了吗?
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [函数插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?
chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
互联网信息聚合+GPT | [函数插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck),再回答问题,让信息永不过时
互联网信息聚合+GPT | [函数插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
⭐Arxiv论文精细翻译 | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
多线程函数插件支持 | 支持多线调用chatgpt一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序
启动暗色gradio[主题](https://github.com/binary-husky/chatgpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持[API2D](https://api2d.com/)接口支持 | 同时被GPT3.5、GPT4、[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
更多LLM模型接入支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)[RWKV](https://github.com/BlinkDL/ChatRWKV)和[盘古α](https://openi.org.cn/pangu/)
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
更多LLM模型接入支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
更多新功能展示(图像生成等) …… | 见本文档结尾处 ……
</div>
@ -84,19 +87,18 @@ chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
</div>
---
# Installation
## 安装-方法1:直接运行 (Windows, Linux or MacOS)
### 安装方法I:直接运行 (Windows, Linux or MacOS)
1. 下载项目
```sh
git clone https://github.com/binary-husky/chatgpt_academic.git
cd chatgpt_academic
git clone https://github.com/binary-husky/gpt_academic.git
cd gpt_academic
```
2. 配置API_KEY
在`config.py`中配置API KEY等设置[特殊网络环境设置](https://github.com/binary-husky/gpt_academic/issues/1) 。
在`config.py`中配置API KEY等设置[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。
(P.S. 程序运行时会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。因此,如果您能理解我们的配置读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中。`config_private.py`不受git管控可以让您的隐私信息更加安全。P.S.项目同样支持通过`环境变量`配置大多数选项,环境变量的书写格式参考`docker-compose`文件。读取优先级: `环境变量` > `config_private.py` > `config.py`)
@ -112,6 +114,7 @@ conda activate gptac_venv # 激活anaconda环境
python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
```
<details><summary>如果需要支持清华ChatGLM/复旦MOSS作为后端请点击展开此处</summary>
<p>
@ -138,19 +141,13 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-
python main.py
```
5. 测试函数插件
```
- 测试函数插件模板函数要求gpt回答历史上的今天发生了什么您可以根据此函数为模板实现更复杂的功能
点击 "[函数插件模板Demo] 历史上的今天"
```
### 安装方法II使用Docker
## 安装-方法2使用Docker
1. 仅ChatGPT推荐大多数人选择
1. 仅ChatGPT推荐大多数人选择等价于docker-compose方案1
``` sh
git clone https://github.com/binary-husky/chatgpt_academic.git # 下载项目
cd chatgpt_academic # 进入路径
git clone https://github.com/binary-husky/gpt_academic.git # 下载项目
cd gpt_academic # 进入路径
nano config.py # 用任意文本编辑器编辑config.py, 配置 “Proxy” “API_KEY” 以及 “WEB_PORT” (例如50923) 等
docker build -t gpt-academic . # 安装
@ -159,42 +156,48 @@ docker run --rm -it --net=host gpt-academic
#(最后一步-选择2在macOS/windows环境下只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
```
P.S. 如果需要依赖Latex的插件功能请见Wiki。另外您也可以直接使用docker-compose获取Latex功能修改docker-compose.yml保留方案4并删除其他方案
2. ChatGPT + ChatGLM + MOSS需要熟悉Docker
``` sh
# 修改docker-compose.yml删除方案1和方案3保留方案2。修改docker-compose.yml中方案2的配置参考其中注释即可
# 修改docker-compose.yml保留方案2并删除其他方案。修改docker-compose.yml中方案2的配置参考其中注释即可
docker-compose up
```
3. ChatGPT + LLAMA + 盘古 + RWKV需要熟悉Docker
``` sh
# 修改docker-compose.yml删除方案1和方案2保留方案3。修改docker-compose.yml中方案3的配置参考其中注释即可
# 修改docker-compose.yml保留方案3并删除其他方案。修改docker-compose.yml中方案3的配置参考其中注释即可
docker-compose up
```
## 安装-方法3:其他部署姿势
### 安装方法III:其他部署姿势
1. 一键运行脚本。
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。
脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。
1. 如何使用反代URL/微软云AzureAPI
2. 使用docker-compose运行。
请阅读docker-compose.yml后按照其中的提示操作即可
3. 如何使用反代URL
按照`config.py`中的说明配置API_URL_REDIRECT即可。
2. 远程云服务器部署(需要云服务器知识与经验)
请访问[部署wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
4. 微软云AzureAPI
按照`config.py`中的说明配置即可AZURE_ENDPOINT等四个配置
3. 使用WSL2Windows Subsystem for Linux 子系统)
请访问[部署wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
5. 远程云服务器部署(需要云服务器知识与经验)。
请访问[部署wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
4. 如何在二级网址(如`http://localhost/subpath`)下运行
6. 使用WSL2Windows Subsystem for Linux 子系统)。
请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
7. 如何在二级网址(如`http://localhost/subpath`)下运行。
请访问[FastAPI运行说明](docs/WithFastapi.md)
5. 使用docker-compose运行
请阅读docker-compose.yml后按照其中的提示操作即可
---
# Advanced Usage
## 自定义新的便捷按钮 / 自定义函数插件
1. 自定义新的便捷按钮(学术快捷键)
# Advanced Usage
### I自定义新的便捷按钮学术快捷键
任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
例如
```
@ -210,50 +213,45 @@ docker-compose up
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
</div>
2. 自定义函数插件
### II自定义函数插件
编写强大的函数插件来执行任何你想得到的和想不到的任务。
本项目的插件编写、调试难度很低只要您具备一定的python基础知识就可以仿照我们提供的模板实现自己的插件功能。
详情请参考[函数插件指南](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
---
# Latest Update
## 新功能动态
### I新功能动态
1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件
另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。
Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史html存档缓存,点击 `删除所有本地对话历史记录` 可以删除所有html存档缓存
Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史html存档缓存。
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
</div>
2. 生成报告。大部分插件都会在执行结束后,生成工作报告
2. ⭐Latex/Arxiv论文翻译功能⭐
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/002a1a75-ace0-4e6a-94e2-ec1406a746f1" height="250" > ===>
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/9fdcc391-f823-464f-9322-f8719677043b" height="250" >
</div>
3. 模块化功能设计,简单的接口却能支持强大的功能
3. 生成报告。大部分插件都会在执行结束后,生成工作报告
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="250" >
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="250" >
</div>
4. 模块化功能设计,简单的接口却能支持强大的功能
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
</div>
4. 这是一个能够“自我译解”的开源项目
5. 译解其他开源项目
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
</div>
5. 译解其他开源项目,不在话下
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
</div>
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" height="250" >
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" height="250" >
</div>
6. 装饰[live2d](https://github.com/fghrsh/live2d_demo)的小功能(默认关闭,需要修改`config.py`
@ -278,13 +276,15 @@ Tip不指定文件直接点击 `载入对话历史存档` 可以查看历史h
10. Latex全文校对纠错
<div align="center">
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" height="200" > ===>
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/476f66d9-7716-4537-b5c1-735372c25adb" height="200">
</div>
## 版本:
### II版本:
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
- version 3.4(Todo): 完善chatglm本地大模型的多线支持
- version 3.4: +arxiv论文翻译、latex论文批改功能
- version 3.3: +互联网信息综合功能
- version 3.2: 函数插件支持更多参数接口 (保存对话功能, 解读任意语言代码+同时询问任意的LLM组合)
- version 3.1: 支持同时问询多个gpt模型支持api2d支持多个apikey负载均衡
@ -302,29 +302,37 @@ gpt_academic开发者QQ群-2610599535
- 已知问题
- 某些浏览器翻译插件干扰此软件前端的运行
- 官方Gradio目前有很多兼容性Bug请务必使用requirement.txt安装Gradio
- 官方Gradio目前有很多兼容性Bug请务必使用`requirement.txt`安装Gradio
## 参考与学习
### III主题
1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
### IV参考与学习
```
代码中参考了很多其他优秀项目中的设计,主要包括
代码中参考了很多其他优秀项目中的设计,顺序不分先后
# 项目1清华ChatGLM-6B:
# 清华ChatGLM-6B:
https://github.com/THUDM/ChatGLM-6B
# 项目2清华JittorLLMs:
# 清华JittorLLMs:
https://github.com/Jittor/JittorLLMs
# 项目3Edge-GPT:
https://github.com/acheong08/EdgeGPT
# 项目4ChuanhuChatGPT:
https://github.com/GaiZhenbiao/ChuanhuChatGPT
# 项目5ChatPaper:
# ChatPaper:
https://github.com/kaixindelele/ChatPaper
# 更多:
# Edge-GPT:
https://github.com/acheong08/EdgeGPT
# ChuanhuChatGPT:
https://github.com/GaiZhenbiao/ChuanhuChatGPT
# Oobabooga one-click installer:
https://github.com/oobabooga/one-click-installers
# More
https://github.com/gradio-app/gradio
https://github.com/fghrsh/live2d_demo
```

View File

@ -12,6 +12,8 @@ def check_proxy(proxies):
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
elif 'error' in data:
result = f"代理配置 {proxies_https}, 代理所在地未知IP查询频率受限"
else:
result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}"
print(result)
return result
except:

View File

@ -34,58 +34,28 @@ def print亮紫(*kw,**kargs):
def print亮靛(*kw,**kargs):
print("\033[1;36m",*kw,"\033[0m",**kargs)
def print亮红(*kw,**kargs):
print("\033[1;31m",*kw,"\033[0m",**kargs)
def print亮绿(*kw,**kargs):
print("\033[1;32m",*kw,"\033[0m",**kargs)
def print亮黄(*kw,**kargs):
print("\033[1;33m",*kw,"\033[0m",**kargs)
def print亮蓝(*kw,**kargs):
print("\033[1;34m",*kw,"\033[0m",**kargs)
def print亮紫(*kw,**kargs):
print("\033[1;35m",*kw,"\033[0m",**kargs)
def print亮靛(*kw,**kargs):
print("\033[1;36m",*kw,"\033[0m",**kargs)
print_red = print红
print_green = print绿
print_yellow = print黄
print_blue = print蓝
print_purple = print紫
print_indigo = print靛
print_bold_red = print亮红
print_bold_green = print亮绿
print_bold_yellow = print亮黄
print_bold_blue = print亮蓝
print_bold_purple = print亮紫
print_bold_indigo = print亮靛
if not stdout.isatty():
# redirection, avoid a fucked up log file
print红 = print
print绿 = print
print黄 = print
print蓝 = print
print紫 = print
print靛 = print
print亮红 = print
print亮绿 = print
print亮黄 = print
print亮蓝 = print
print亮紫 = print
print亮靛 = print
print_red = print
print_green = print
print_yellow = print
print_blue = print
print_purple = print
print_indigo = print
print_bold_red = print
print_bold_green = print
print_bold_yellow = print
print_bold_blue = print
print_bold_purple = print
print_bold_indigo = print
# Do you like the elegance of Chinese characters?
def sprint红(*kw):
return "\033[0;31m"+' '.join(kw)+"\033[0m"
def sprint绿(*kw):
return "\033[0;32m"+' '.join(kw)+"\033[0m"
def sprint(*kw):
return "\033[0;33m"+' '.join(kw)+"\033[0m"
def sprint(*kw):
return "\033[0;34m"+' '.join(kw)+"\033[0m"
def sprint(*kw):
return "\033[0;35m"+' '.join(kw)+"\033[0m"
def sprint(*kw):
return "\033[0;36m"+' '.join(kw)+"\033[0m"
def sprint亮红(*kw):
return "\033[1;31m"+' '.join(kw)+"\033[0m"
def sprint亮绿(*kw):
return "\033[1;32m"+' '.join(kw)+"\033[0m"
def sprint亮黄(*kw):
return "\033[1;33m"+' '.join(kw)+"\033[0m"
def sprint亮蓝(*kw):
return "\033[1;34m"+' '.join(kw)+"\033[0m"
def sprint亮紫(*kw):
return "\033[1;35m"+' '.join(kw)+"\033[0m"
def sprint亮靛(*kw):
return "\033[1;36m"+' '.join(kw)+"\033[0m"

104
config.py
View File

@ -1,16 +1,27 @@
# [step 1]>> 例如: API_KEY = "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" 此key无效
API_KEY = "sk-此处填API密钥" # 可同时填写多个API-KEY用英文逗号分割例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
"""
以下所有配置也都支持利用环境变量覆写环境变量配置格式见docker-compose.yml。
读取优先级:环境变量 > config_private.py > config.py
--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
All the following configurations also support using environment variables to override,
and the environment variable configuration format can be seen in docker-compose.yml.
Configuration reading priority: environment variable > config_private.py > config.py
"""
# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下还需要填写组织格式如org-123456789abcdefghijklmno的请向下翻找 API_ORG 设置项
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY用英文逗号分割例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
# [step 2]>> 改为True应用代理如果直接在海外服务器部署此处不修改
USE_PROXY = False
if USE_PROXY:
# 填写格式是 [协议]:// [地址] :[端口]填写之前不要忘记把USE_PROXY改成True如果直接在海外服务器部署此处不修改
# 例如 "socks5h://localhost:11284"
# [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
# [地址] 懂的都懂不懂就填localhost或者127.0.0.1肯定错不了localhost意思是代理软件安装在本机上
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
"""
填写格式是 [协议]:// [地址] :[端口]填写之前不要忘记把USE_PROXY改成True如果直接在海外服务器部署此处不修改
<配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1>
[协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
[地址] 懂的都懂不懂就填localhost或者127.0.0.1肯定错不了localhost意思是代理软件安装在本机上
[端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
"""
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5h / http)、地址(localhost)和端口(11284)
proxies = {
# [协议]:// [地址] :[端口]
"http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
@ -19,65 +30,104 @@ if USE_PROXY:
else:
proxies = None
# [step 3]>> 多线程函数插件中默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次Pay-as-you-go users的限制是每分钟3500次
# 一言以蔽之免费用户填3OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询https://platform.openai.com/docs/guides/rate-limits/overview
# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------
# 重新URL重新定向实现更换API_URL的作用常规情况下不要修改!! 高危设置通过修改此设置您将把您的API-KEY和对话隐私完全暴露给您设定的中间人
# 格式 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
# 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions":"https://reverse-proxy-url/v1/chat/completions"}
API_URL_REDIRECT = {}
# 多线程函数插件中默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次Pay-as-you-go users的限制是每分钟3500次
# 一言以蔽之免费5刀用户填3OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询https://platform.openai.com/docs/guides/rate-limits/overview
DEFAULT_WORKER_NUM = 3
# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
# 对话窗的高度
CHATBOT_HEIGHT = 1115
# 代码高亮
CODE_HIGHLIGHT = True
# 窗口布局
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
DARK_MODE = True # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
DARK_MODE = True # 暗色模式 / 亮色模式
# 发送请求到OpenAI后等待多久判定为超时
TIMEOUT_SECONDS = 30
# 网页的端口, -1代表随机端口
WEB_PORT = -1
# 如果OpenAI不响应网络卡顿、代理失败、KEY失效重试的次数限制
MAX_RETRY = 2
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"]
# P.S. 其他可用的模型还包括 ["newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
# 设置gradio的并行线程数不需要修改
CONCURRENT_COUNT = 100
# 是否在提交时自动清空输入框
AUTO_CLEAR_TXT = False
# 色彩主体,可选 ["Default", "Chuanhu-Small-and-Beautiful"]
THEME = "Default"
# 加一个live2d装饰
ADD_WAIFU = False
# 设置用户名和密码不需要修改相关功能不稳定与gradio版本和网络都相关如果本地使用不建议加这个
# [("username", "password"), ("username2", "password2"), ...]
AUTHENTICATION = []
# 重新URL重新定向实现更换API_URL的作用常规情况下不要修改!!
# 高危设置通过修改此设置您将把您的API-KEY和对话隐私完全暴露给您设定的中间人
# 格式 {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
# 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://ai.open.com/api/conversation"}
API_URL_REDIRECT = {}
# 如果需要在二级路径下运行(常规情况下,不要修改!!需要配合修改main.py才能生效!
CUSTOM_PATH = "/"
# 如果需要使用newbing把newbing的长长的cookie放到这里
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
# 从现在起,如果您调用"newbing-free"模型则无需填写NEWBING_COOKIES
NEWBING_COOKIES = """
your bing cookies here
"""
# 极少数情况下openai的官方KEY需要伴随组织编码格式如org-xxxxxxxxxxxxxxxxxxxxxxxx使用
API_ORG = ""
# 如果需要使用Slack Claude使用教程详情见 request_llm/README.md
SLACK_CLAUDE_BOT_ID = ''
SLACK_CLAUDE_USER_TOKEN = ''
# 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md
AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写该选项即将被弃用
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
# 使用Newbing
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
NEWBING_COOKIES = """
put your new bing cookies here
"""
# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://help.aliyun.com/document_detail/450255.html
ENABLE_AUDIO = False
ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f
ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
# ChatGLM Finetune Model Path
ChatGLM_PTUNING_CHECKPOINT = ""

View File

@ -63,6 +63,7 @@ def get_core_functions():
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL" +
r"然后请使用Markdown格式封装并且不要有反斜线不要用代码块。现在请按以下描述给我发送图片" + "\n\n",
"Suffix": r"",
"Visible": False,
},
"解释代码": {
"Prefix": r"请解释以下代码:" + "\n```\n",
@ -73,6 +74,5 @@ def get_core_functions():
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
r"Items need to be transformed:",
"Suffix": r"",
"Visible": False,
}
}

View File

@ -112,11 +112,11 @@ def get_crazy_functions():
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(解析项目本身)
},
"[老旧的Demo] 把本项目源代码切换成全英文": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(全项目切换英文)
},
# "[老旧的Demo] 把本项目源代码切换成全英文": {
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
# "AsButton": False, # 加入下拉菜单中
# "Function": HotReload(全项目切换英文)
# },
"[插件demo] 历史上的今天": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Function": HotReload(高阶功能模板函数)
@ -126,7 +126,7 @@ def get_crazy_functions():
###################### 第二组插件 ###########################
# [第二组插件]: 经过充分测试
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
# from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
@ -152,17 +152,16 @@ def get_crazy_functions():
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Function": HotReload(批量总结PDF文档)
},
"[测试功能] 批量总结PDF文档pdfminer": {
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(批量总结PDF文档pdfminer)
},
# "[测试功能] 批量总结PDF文档pdfminer": {
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# "Function": HotReload(批量总结PDF文档pdfminer)
# },
"谷歌学术检索助手输入谷歌学术搜索页url": {
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(谷歌检索小助手)
},
"理解PDF文档内容 模仿ChatPDF": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "stop",
@ -181,7 +180,7 @@ def get_crazy_functions():
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(Latex英文纠错)
},
"[测试功能] 中文Latex项目全文润色输入路径或上传压缩包": {
"中文Latex项目全文润色输入路径或上传压缩包": {
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
@ -210,7 +209,9 @@ def get_crazy_functions():
})
###################### 第三组插件 ###########################
# [第三组插件]: 尚未充分测试的函数插件,放在这里
# [第三组插件]: 尚未充分测试的函数插件
try:
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
function_plugins.update({
"一键下载arxiv论文并翻译摘要先在input输入编号如1812.10695": {
@ -219,16 +220,30 @@ def get_crazy_functions():
"Function": HotReload(下载arxiv论文并翻译摘要)
}
})
except:
print('Load function plugin failed')
try:
from crazy_functions.联网的ChatGPT import 连接网络回答问题
function_plugins.update({
"连接网络回答问题(输入问题,再点击按钮,需要访问谷歌)": {
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(连接网络回答问题)
}
})
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
function_plugins.update({
"连接网络回答问题中文Bing版输入问题后点击该插件": {
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(连接bing搜索回答问题)
}
})
except:
print('Load function plugin failed')
try:
from crazy_functions.解析项目源代码 import 解析任意code项目
function_plugins.update({
"解析项目源代码(手动指定和筛选源代码文件类型)": {
@ -239,6 +254,10 @@ def get_crazy_functions():
"Function": HotReload(解析任意code项目)
},
})
except:
print('Load function plugin failed')
try:
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
function_plugins.update({
"询问多个GPT模型手动指定询问哪些模型": {
@ -249,6 +268,10 @@ def get_crazy_functions():
"Function": HotReload(同时问询_指定模型)
},
})
except:
print('Load function plugin failed')
try:
from crazy_functions.图片生成 import 图片生成
function_plugins.update({
"图片生成先切换模型到openai或api2d": {
@ -259,6 +282,10 @@ def get_crazy_functions():
"Function": HotReload(图片生成)
},
})
except:
print('Load function plugin failed')
try:
from crazy_functions.总结音视频 import 总结音视频
function_plugins.update({
"批量总结音视频(输入路径或上传压缩包)": {
@ -269,6 +296,9 @@ def get_crazy_functions():
"Function": HotReload(总结音视频)
}
})
except:
print('Load function plugin failed')
try:
from crazy_functions.数学动画生成manim import 动画生成
function_plugins.update({
@ -295,5 +325,125 @@ def get_crazy_functions():
except:
print('Load function plugin failed')
###################### 第n组插件 ###########################
try:
from crazy_functions.Langchain知识库 import 知识库问答
function_plugins.update({
"[功能尚不稳定] 构建知识库(请先上传文件素材)": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "待注入的知识库名称id, 默认为default",
"Function": HotReload(知识库问答)
}
})
except:
print('Load function plugin failed')
try:
from crazy_functions.Langchain知识库 import 读取知识库作答
function_plugins.update({
"[功能尚不稳定] 知识库问答": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库",
"Function": HotReload(读取知识库作答)
}
})
except:
print('Load function plugin failed')
try:
from crazy_functions.交互功能函数模板 import 交互功能模板函数
function_plugins.update({
"交互功能模板函数": {
"Color": "stop",
"AsButton": False,
"Function": HotReload(交互功能模板函数)
}
})
except:
print('Load function plugin failed')
# try:
# from crazy_functions.chatglm微调工具 import 微调数据集生成
# function_plugins.update({
# "黑盒模型学习: 微调数据集生成 (先上传数据集)": {
# "Color": "stop",
# "AsButton": False,
# "AdvancedArgs": True,
# "ArgsReminder": "针对数据集输入(如 绿帽子*深蓝色衬衫*黑色运动裤)给出指令,例如您可以将以下命令复制到下方: --llm_to_learn=azure-gpt-3.5 --prompt_prefix='根据下面的服装类型提示想象一个穿着者对这个人外貌、身处的环境、内心世界、过去经历进行描写。要求100字以内用第二人称。' --system_prompt=''",
# "Function": HotReload(微调数据集生成)
# }
# })
# except:
# print('Load function plugin failed')
try:
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
function_plugins.update({
"Latex英文纠错+高亮修正位置 [需Latex]": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
"Function": HotReload(Latex英文纠错加PDF对比)
}
})
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
function_plugins.update({
"Arixv翻译输入arxivID[需Latex]": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder":
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Function": HotReload(Latex翻译中文并重新编译PDF)
}
})
function_plugins.update({
"本地论文翻译上传Latex压缩包[需Latex]": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder":
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Function": HotReload(Latex翻译中文并重新编译PDF)
}
})
except:
print('Load function plugin failed')
try:
from toolbox import get_conf
ENABLE_AUDIO, = get_conf('ENABLE_AUDIO')
if ENABLE_AUDIO:
from crazy_functions.语音助手 import 语音助手
function_plugins.update({
"实时音频采集": {
"Color": "stop",
"AsButton": True,
"Function": HotReload(语音助手)
}
})
except:
print('Load function plugin failed')
try:
from crazy_functions.虚空终端 import 终端
function_plugins.update({
"超级终端": {
"Color": "stop",
"AsButton": False,
# "AdvancedArgs": True,
# "ArgsReminder": "",
"Function": HotReload(终端)
}
})
except:
print('Load function plugin failed')
return function_plugins

View File

@ -0,0 +1,107 @@
from toolbox import CatchException, update_ui, ProxyNetworkActivate
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
@CatchException
def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
plugin_kwargs 插件模型的参数,暂时没有用武之地
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "[Local Message] 从一批文件(txt, md, tex)中读取数据构建知识库, 然后进行问答。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# resolve deps
try:
from zh_langchain import construct_vector_store
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from .crazy_utils import knowledge_archive_interface
except Exception as e:
chatbot.append(
["依赖不足",
"导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."]
)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
from .crazy_utils import try_install_deps
try_install_deps(['zh_langchain==0.2.1'])
# < --------------------读取参数--------------- >
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
kai_id = plugin_kwargs.get("advanced_arg", 'default')
# < --------------------读取文件--------------- >
file_manifest = []
spl = ["txt", "doc", "docx", "email", "epub", "html", "json", "md", "msg", "pdf", "ppt", "pptx", "rtf"]
for sp in spl:
_, file_manifest_tmp, _ = get_files_from_everything(txt, type=f'.{sp}')
file_manifest += file_manifest_tmp
if len(file_manifest) == 0:
chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# < -------------------预热文本向量化模组--------------- >
chatbot.append(['<br/>'.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate(): # 临时地激活代理网络
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
# < -------------------构建知识库--------------- >
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
print('Establishing knowledge archive ...')
with ProxyNetworkActivate(): # 临时地激活代理网络
kai = knowledge_archive_interface()
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
kai_files = kai.get_loaded_file()
kai_files = '<br/>'.join(kai_files)
# chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
# chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答'
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
@CatchException
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
# resolve deps
try:
from zh_langchain import construct_vector_store
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from .crazy_utils import knowledge_archive_interface
except Exception as e:
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
from .crazy_utils import try_install_deps
try_install_deps(['zh_langchain==0.2.1'])
# < ------------------- --------------- >
kai = knowledge_archive_interface()
if 'langchain_plugin_embedding' in chatbot._cookies:
resp, prompt = kai.answer_with_archive_by_id(txt, chatbot._cookies['langchain_plugin_embedding'])
else:
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
kai_id = plugin_kwargs.get("advanced_arg", 'default')
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
chatbot.append((txt, '[Local Message] ' + prompt))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=prompt, inputs_show_user=txt,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=system_prompt
)
history.extend((prompt, gpt_say))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新

View File

@ -238,3 +238,6 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread')

View File

@ -0,0 +1,300 @@
from toolbox import update_ui, trimmed_format_exc, get_conf, objdump, objload, promote_file_to_downloadzone
from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
from functools import partial
import glob, os, requests, time
pj = os.path.join
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
# =================================== 工具函数 ===============================================
专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
def switch_prompt(pfg, mode, more_requirement):
"""
Generate prompts and system prompts based on the mode for proofreading or translating.
Args:
- pfg: Proofreader or Translator instance.
- mode: A string specifying the mode, either 'proofread' or 'translate_zh'.
Returns:
- inputs_array: A list of strings containing prompts for users to respond to.
- sys_prompt_array: A list of strings containing prompts for system prompts.
"""
n_split = len(pfg.sp_file_contents)
if mode == 'proofread_en':
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement +
r"Answer me only with the revised text:" +
f"\n\n{frag}" for frag in pfg.sp_file_contents]
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
elif mode == 'translate_zh':
inputs_array = [r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement +
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " +
r"Answer me only with the translated text:" +
f"\n\n{frag}" for frag in pfg.sp_file_contents]
sys_prompt_array = ["You are a professional translator." for _ in range(n_split)]
else:
assert False, "未知指令"
return inputs_array, sys_prompt_array
def desend_to_extracted_folder_if_exist(project_folder):
"""
Descend into the extracted folder if it exists, otherwise return the original folder.
Args:
- project_folder: A string specifying the folder path.
Returns:
- A string specifying the path to the extracted folder, or the original folder if there is no extracted folder.
"""
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
if len(maybe_dir) == 0: return project_folder
if maybe_dir[0].endswith('.extract'): return maybe_dir[0]
return project_folder
def move_project(project_folder, arxiv_id=None):
"""
Create a new work folder and copy the project folder to it.
Args:
- project_folder: A string specifying the folder path of the project.
Returns:
- A string specifying the path to the new work folder.
"""
import shutil, time
time.sleep(2) # avoid time string conflict
if arxiv_id is not None:
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
else:
new_workfolder = f'gpt_log/{gen_time_str()}'
try:
shutil.rmtree(new_workfolder)
except:
pass
# align subfolder if there is a folder wrapper
items = glob.glob(pj(project_folder,'*'))
if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1:
if os.path.isdir(items[0]): project_folder = items[0]
shutil.copytree(src=project_folder, dst=new_workfolder)
return new_workfolder
def arxiv_download(chatbot, history, txt):
def check_cached_translation_pdf(arxiv_id):
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
if not os.path.exists(translation_dir):
os.makedirs(translation_dir)
target_file = pj(translation_dir, 'translate_zh.pdf')
if os.path.exists(target_file):
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
return target_file
return False
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
txt = 'https://arxiv.org/abs/' + txt.strip()
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
txt = 'https://arxiv.org/abs/' + txt[:10]
if not txt.startswith('https://arxiv.org'):
return txt, None
# <-------------- inspect format ------------->
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
yield from update_ui(chatbot=chatbot, history=history)
time.sleep(1) # 刷新界面
url_ = txt # https://arxiv.org/abs/1707.06690
if not txt.startswith('https://arxiv.org/abs/'):
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}"
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
return msg, None
# <-------------- set format ------------->
arxiv_id = url_.split('/abs/')[-1]
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
if cached_translation_pdf: return cached_translation_pdf, arxiv_id
url_tar = url_.replace('/abs/', '/e-print/')
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract')
os.makedirs(translation_dir, exist_ok=True)
# <-------------- download arxiv source file ------------->
dst = pj(translation_dir, arxiv_id+'.tar')
if os.path.exists(dst):
yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
else:
yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
proxies, = get_conf('proxies')
r = requests.get(url_tar, proxies=proxies)
with open(dst, 'wb+') as f:
f.write(r.content)
# <-------------- extract file ------------->
yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面
from toolbox import extract_archive
extract_archive(file_path=dst, dest_dir=extract_dst)
return extract_dst, arxiv_id
# ========================================= 插件主程序1 =====================================================
@CatchException
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# <-------------- information about this plugin ------------->
chatbot.append([ "函数插件功能?",
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4其他模型转化效果未知。目前对机器学习类文献转化效果最好其他类型文献转化效果未知。仅在Windows系统进行了测试其他操作系统表现未知。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# <-------------- more requirements ------------->
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
more_req = plugin_kwargs.get("advanced_arg", "")
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
# <-------------- check deps ------------->
try:
import glob, os, time, subprocess
subprocess.Popen(['pdflatex', '-version'])
from .latex_utils import Latex精细分解与转化, 编译Latex
except Exception as e:
chatbot.append([ f"解析项目: {txt}",
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# <-------------- clear history and read input ------------->
history = []
if os.path.exists(txt):
project_folder = txt
else:
if txt == "": txt = '空空如也的输入栏'
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
if len(file_manifest) == 0:
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# <-------------- if is a zip/tar file ------------->
project_folder = desend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
project_folder = move_project(project_folder, arxiv_id=None)
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_)
# <-------------- compile PDF ------------->
success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_proofread_en',
work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder)
# <-------------- zip PDF ------------->
zip_res = zip_result(project_folder)
if success:
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
else:
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
# <-------------- we are done ------------->
return success
# ========================================= 插件主程序2 =====================================================
@CatchException
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# <-------------- information about this plugin ------------->
chatbot.append([
"函数插件功能?",
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳Linux下必须使用Docker安装详见项目主README.md。目前仅支持GPT3.5/GPT4其他模型转化效果未知。目前对机器学习类文献转化效果最好其他类型文献转化效果未知。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# <-------------- more requirements ------------->
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
more_req = plugin_kwargs.get("advanced_arg", "")
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
# <-------------- check deps ------------->
try:
import glob, os, time, subprocess
subprocess.Popen(['pdflatex', '-version'])
from .latex_utils import Latex精细分解与转化, 编译Latex
except Exception as e:
chatbot.append([ f"解析项目: {txt}",
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# <-------------- clear history and read input ------------->
history = []
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt)
if txt.endswith('.pdf'):
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
if os.path.exists(txt):
project_folder = txt
else:
if txt == "": txt = '空空如也的输入栏'
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
if len(file_manifest) == 0:
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# <-------------- if is a zip/tar file ------------->
project_folder = desend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
project_folder = move_project(project_folder, arxiv_id)
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
chatbot, history, system_prompt, mode='translate_zh', switch_prompt=_switch_prompt_)
# <-------------- compile PDF ------------->
success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_translate_zh', mode='translate_zh',
work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder)
# <-------------- zip PDF ------------->
zip_res = zip_result(project_folder)
if success:
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
else:
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
# <-------------- we are done ------------->
return success

View File

@ -0,0 +1,125 @@
from toolbox import CatchException, update_ui, promote_file_to_downloadzone
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
import datetime, json
def fetch_items(list_of_items, batch_size):
for i in range(0, len(list_of_items), batch_size):
yield list_of_items[i:i + batch_size]
def string_to_options(arguments):
import argparse
import shlex
# Create an argparse.ArgumentParser instance
parser = argparse.ArgumentParser()
# Add command-line arguments
parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
parser.add_argument("--batch", type=int, help="System prompt", default=50)
# Parse the arguments
args = parser.parse_args(shlex.split(arguments))
return args
@CatchException
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
args = plugin_kwargs.get("advanced_arg", None)
if args is None:
chatbot.append(("没给定指令", "退出"))
yield from update_ui(chatbot=chatbot, history=history); return
else:
arguments = string_to_options(arguments=args)
dat = []
with open(txt, 'r', encoding='utf8') as f:
for line in f.readlines():
json_dat = json.loads(line)
dat.append(json_dat["content"])
llm_kwargs['llm_model'] = arguments.llm_to_learn
for batch in fetch_items(dat, arguments.batch):
res = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array=[f"{arguments.prompt_prefix}\n\n{b}" for b in (batch)],
inputs_show_user_array=[f"Show Nothing" for _ in (batch)],
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history_array=[[] for _ in (batch)],
sys_prompt_array=[arguments.system_prompt for _ in (batch)],
max_workers=10 # OpenAI所允许的最大并行过载
)
with open(txt+'.generated.json', 'a+', encoding='utf8') as f:
for b, r in zip(batch, res[1::2]):
f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n')
promote_file_to_downloadzone(txt+'.generated.json', rename_file='generated.json', chatbot=chatbot)
return
def 启动微调(arguments):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
import subprocess
PRE_SEQ_LEN = 128
LR = 2e-2
NUM_GPUS = 1
JSON_FILE = 't_code.json'
tune_work_path = '/home/hmp/ChatGLM2-6B/ptuning'
command = f"torchrun --standalone --nnodes=1 --nproc-per-node={NUM_GPUS} main.py \
--do_train \
--train_file AdvertiseGen/{JSON_FILE} \
--validation_file AdvertiseGen/{JSON_FILE} \
--preprocessing_num_workers 20 \
--prompt_column content \
--response_column summary \
--overwrite_cache \
--model_name_or_path THUDM/chatglm2-6b \
--output_dir output/clothgen-chatglm2-6b-pt-{PRE_SEQ_LEN}-{LR} \
--overwrite_output_dir \
--max_source_length 256 \
--max_target_length 256 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 16 \
--predict_with_generate \
--max_steps 100 \
--logging_steps 10 \
--save_steps 20 \
--learning_rate {LR} \
--pre_seq_len {PRE_SEQ_LEN} \
--quantization_bit 4"
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tune_work_path)
try:
stdout, stderr = process.communicate(timeout=3600*5)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
print("Process timed out!")
return False
return

View File

@ -4,16 +4,24 @@
运行方法 python crazy_functions/crazy_functions_test.py
"""
# ==============================================================================================================================
def validate_path():
import os, sys
dir_name = os.path.dirname(__file__)
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # validate path so you can run from base directory
# ==============================================================================================================================
from colorful import *
from toolbox import get_conf, ChatBotWithCookies
import contextlib
import os
import sys
from functools import wraps
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
@ -30,7 +38,43 @@ history = []
system_prompt = "Serve me as a writing and programming assistant."
web_port = 1024
# ==============================================================================================================================
def silence_stdout(func):
@wraps(func)
def wrapper(*args, **kwargs):
_original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
for q in func(*args, **kwargs):
sys.stdout = _original_stdout
yield q
sys.stdout = open(os.devnull, 'w')
sys.stdout.close()
sys.stdout = _original_stdout
return wrapper
class CLI_Printer():
def __init__(self) -> None:
self.pre_buf = ""
def print(self, buf):
bufp = ""
for index, chat in enumerate(buf):
a, b = chat
bufp += sprint亮靛('[Me]:' + a) + '\n'
bufp += '[GPT]:' + b
if index < len(buf)-1:
bufp += '\n'
if self.pre_buf!="" and bufp.startswith(self.pre_buf):
print(bufp[len(self.pre_buf):], end='')
else:
print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'+bufp, end='')
self.pre_buf = bufp
return
cli_printer = CLI_Printer()
# ==============================================================================================================================
def test_解析一个Python项目():
from crazy_functions.解析项目源代码 import 解析一个Python项目
txt = "crazy_functions/test_project/python/dqn"
@ -116,8 +160,67 @@ def test_Markdown多语言():
for cookies, cb, hist, msg in Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
print(cb)
def test_Langchain知识库():
from crazy_functions.Langchain知识库 import 知识库问答
txt = "./"
chatbot = ChatBotWithCookies(llm_kwargs)
for cookies, cb, hist, msg in silence_stdout(知识库问答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb) # print(cb)
chatbot = ChatBotWithCookies(cookies)
from crazy_functions.Langchain知识库 import 读取知识库作答
txt = "What is the installation method"
for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb) # print(cb)
def test_Langchain知识库读取():
from crazy_functions.Langchain知识库 import 读取知识库作答
txt = "远程云服务器部署?"
for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb) # print(cb)
def test_Latex():
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比, Latex翻译中文并重新编译PDF
# txt = r"https://arxiv.org/abs/1706.03762"
# txt = r"https://arxiv.org/abs/1902.03185"
# txt = r"https://arxiv.org/abs/2305.18290"
# txt = r"https://arxiv.org/abs/2305.17608"
# txt = r"https://arxiv.org/abs/2211.16068" # ACE
# txt = r"C:\Users\x\arxiv_cache\2211.16068\workfolder" # ACE
# txt = r"https://arxiv.org/abs/2002.09253"
# txt = r"https://arxiv.org/abs/2306.07831"
# txt = r"https://arxiv.org/abs/2212.10156"
# txt = r"https://arxiv.org/abs/2211.11559"
# txt = r"https://arxiv.org/abs/2303.08774"
# txt = r"https://arxiv.org/abs/2303.12712"
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
txt = r"2306.17157" # 这个paper有个input命令文件名大小写错误
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb) # print(cb)
# txt = "2302.02948.tar"
# print(txt)
# main_tex, work_folder = Latex预处理(txt)
# print('main tex:', main_tex)
# res = 编译Latex(main_tex, work_folder)
# # for cookies, cb, hist, msg in silence_stdout(编译Latex)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# cli_printer.print(cb) # print(cb)
def test_chatglm_finetune():
from crazy_functions.chatglm微调工具 import 微调数据集生成
txt = 'build/dev.json'
plugin_kwargs = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示想象一个穿着者对这个人外貌、身处的环境、内心世界、人设进行描写。要求100字以内用第二人称。' --system_prompt=''" }
for cookies, cb, hist, msg in (微调数据集生成)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
cli_printer.print(cb)
if __name__ == "__main__":
# test_解析一个Python项目()
# test_Latex英文润色()
# test_Markdown中译英()
@ -129,7 +232,9 @@ def test_Markdown多语言():
# test_联网回答问题()
# test_解析ipynb文件()
# test_数学动画生成manim()
test_Markdown多语言()
# test_Langchain知识库()
# test_Langchain知识库读取()
# test_Latex()
test_chatglm_finetune()
input("程序完成,回车退出。")
print("退出。")

View File

@ -1,4 +1,5 @@
from toolbox import update_ui, get_conf, trimmed_format_exc
import threading
def input_clipping(inputs, history, max_token_limit):
import numpy as np
@ -129,6 +130,11 @@ def request_gpt_model_in_new_thread_with_ui_alive(
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
return final_result
def can_multi_process(llm):
if llm.startswith('gpt-'): return True
if llm.startswith('api2d-'): return True
if llm.startswith('azure-'): return True
return False
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array, inputs_show_user_array, llm_kwargs,
@ -174,7 +180,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
except: max_workers = 8
if max_workers <= 0: max_workers = 3
# 屏蔽掉 chatglm的多线程可能会导致严重卡顿
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
if not can_multi_process(llm_kwargs['llm_model']):
max_workers = 1
executor = ThreadPoolExecutor(max_workers=max_workers)
@ -606,3 +612,142 @@ def get_files_from_everything(txt, type): # type='.md'
success = False
return success, file_manifest, project_folder
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class knowledge_archive_interface():
def __init__(self) -> None:
self.threadLock = threading.Lock()
self.current_id = ""
self.kai_path = None
self.qa_handle = None
self.text2vec_large_chinese = None
def get_chinese_text2vec(self):
if self.text2vec_large_chinese is None:
# < -------------------预热文本向量化模组--------------- >
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate(): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
def feed_archive(self, file_manifest, id="default"):
self.threadLock.acquire()
# import uuid
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=file_manifest,
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
def get_current_archive_id(self):
return self.current_id
def get_loaded_file(self):
return self.qa_handle.get_loaded_file()
def answer_with_archive_by_id(self, txt, id):
self.threadLock.acquire()
if not self.current_id == id:
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=[],
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt,
vs_path = self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
return resp, prompt
def try_install_deps(deps):
for dep in deps:
import subprocess, sys
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
class construct_html():
def __init__(self) -> None:
self.css = """
.row {
display: flex;
flex-wrap: wrap;
}
.column {
flex: 1;
padding: 10px;
}
.table-header {
font-weight: bold;
border-bottom: 1px solid black;
}
.table-row {
border-bottom: 1px solid lightgray;
}
.table-cell {
padding: 5px;
}
"""
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
def add_row(self, a, b):
tmp = """
<div class="row table-row">
<div class="column table-cell">REPLACE_A</div>
<div class="column table-cell">REPLACE_B</div>
</div>
"""
from toolbox import markdown_convertion
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
self.html_string += tmp
def save_file(self, file_name):
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
f.write(self.html_string.encode('utf-8', 'ignore').decode())

View File

@ -0,0 +1,797 @@
from toolbox import update_ui, update_ui_lastest_msg # 刷新Gradio前端界面
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
import os, shutil
import re
import numpy as np
pj = os.path.join
"""
========================================================================
Part One
Latex segmentation with a binary mask (PRESERVE=0, TRANSFORM=1)
========================================================================
"""
PRESERVE = 0
TRANSFORM = 1
def set_forbidden_text(text, mask, pattern, flags=0):
"""
Add a preserve text area in this paper
e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}"
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
everything between "\begin{equation}" and "\end{equation}"
"""
if isinstance(pattern, list): pattern = '|'.join(pattern)
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
mask[res.span()[0]:res.span()[1]] = PRESERVE
return text, mask
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
"""
Move area out of preserve area (make text editable for GPT)
count the number of the braces so as to catch compelete text area.
e.g.
\begin{abstract} blablablablablabla. \end{abstract}
"""
if isinstance(pattern, list): pattern = '|'.join(pattern)
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
if not forbid_wrapper:
mask[res.span()[0]:res.span()[1]] = TRANSFORM
else:
mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
return text, mask
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
"""
Add a preserve text area in this paper (text become untouchable for GPT).
count the number of the braces so as to catch compelete text area.
e.g.
\caption{blablablablabla\texbf{blablabla}blablabla.}
"""
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
brace_level = -1
p = begin = end = res.regs[0][0]
for _ in range(1024*16):
if text[p] == '}' and brace_level == 0: break
elif text[p] == '}': brace_level -= 1
elif text[p] == '{': brace_level += 1
p += 1
end = p+1
mask[begin:end] = PRESERVE
return text, mask
def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
"""
Move area out of preserve area (make text editable for GPT)
count the number of the braces so as to catch compelete text area.
e.g.
\caption{blablablablabla\texbf{blablabla}blablabla.}
"""
pattern_compile = re.compile(pattern, flags)
for res in pattern_compile.finditer(text):
brace_level = 0
p = begin = end = res.regs[1][0]
for _ in range(1024*16):
if text[p] == '}' and brace_level == 0: break
elif text[p] == '}': brace_level -= 1
elif text[p] == '{': brace_level += 1
p += 1
end = p
mask[begin:end] = TRANSFORM
if forbid_wrapper:
mask[res.regs[0][0]:begin] = PRESERVE
mask[end:res.regs[0][1]] = PRESERVE
return text, mask
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
"""
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
Add it to preserve area
"""
pattern_compile = re.compile(pattern, flags)
def search_with_line_limit(text, mask):
for res in pattern_compile.finditer(text):
cmd = res.group(1) # begin{what}
this = res.group(2) # content between begin and end
this_mask = mask[res.regs[2][0]:res.regs[2][1]]
white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
this, this_mask = search_with_line_limit(this, this_mask)
mask[res.regs[2][0]:res.regs[2][1]] = this_mask
else:
mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE
return text, mask
return search_with_line_limit(text, mask)
class LinkedListNode():
"""
Linked List Node
"""
def __init__(self, string, preserve=True) -> None:
self.string = string
self.preserve = preserve
self.next = None
# self.begin_line = 0
# self.begin_char = 0
def convert_to_linklist(text, mask):
root = LinkedListNode("", preserve=True)
current_node = root
for c, m, i in zip(text, mask, range(len(text))):
if (m==PRESERVE and current_node.preserve) \
or (m==TRANSFORM and not current_node.preserve):
# add
current_node.string += c
else:
current_node.next = LinkedListNode(c, preserve=(m==PRESERVE))
current_node = current_node.next
return root
"""
========================================================================
Latex Merge File
========================================================================
"""
def 寻找Latex主文件(file_manifest, mode):
"""
在多Tex文档中寻找主文件必须包含documentclass返回找到的第一个。
P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码)
"""
canidates = []
for texf in file_manifest:
if os.path.basename(texf).startswith('merge'):
continue
with open(texf, 'r', encoding='utf8') as f:
file_content = f.read()
if r'\documentclass' in file_content:
canidates.append(texf)
else:
continue
if len(canidates) == 0:
raise RuntimeError('无法找到一个主Tex文件包含documentclass关键字')
elif len(canidates) == 1:
return canidates[0]
else: # if len(canidates) >= 2 通过一些Latex模板中常见但通常不会出现在正文的单词对不同latex源文件扣分取评分最高者返回
canidates_score = []
# 给出一些判定模板文档的词作为扣分项
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
expected_words = ['\input', '\ref', '\cite']
for texf in canidates:
canidates_score.append(0)
with open(texf, 'r', encoding='utf8') as f:
file_content = f.read()
for uw in unexpected_words:
if uw in file_content:
canidates_score[-1] -= 1
for uw in expected_words:
if uw in file_content:
canidates_score[-1] += 1
select = np.argmax(canidates_score) # 取评分最高者返回
return canidates[select]
def rm_comments(main_file):
new_file_remove_comment_lines = []
for l in main_file.splitlines():
# 删除整行的空注释
if l.lstrip().startswith("%"):
pass
else:
new_file_remove_comment_lines.append(l)
main_file = '\n'.join(new_file_remove_comment_lines)
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
return main_file
def find_tex_file_ignore_case(fp):
dir_name = os.path.dirname(fp)
base_name = os.path.basename(fp)
if not base_name.endswith('.tex'): base_name+='.tex'
if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name)
# go case in-sensitive
import glob
for f in glob.glob(dir_name+'/*.tex'):
base_name_s = os.path.basename(fp)
if base_name_s.lower() == base_name.lower(): return f
return None
def merge_tex_files_(project_foler, main_file, mode):
"""
Merge Tex project recrusively
"""
main_file = rm_comments(main_file)
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
f = s.group(1)
fp = os.path.join(project_foler, f)
fp = find_tex_file_ignore_case(fp)
if fp:
with open(fp, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
else:
raise RuntimeError(f'找不到{fp}Tex源文件缺失')
c = merge_tex_files_(project_foler, c, mode)
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
return main_file
def merge_tex_files(project_foler, main_file, mode):
"""
Merge Tex project recrusively
P.S. 顺便把CTEX塞进去以支持中文
P.S. 顺便把Latex的注释去除
"""
main_file = merge_tex_files_(project_foler, main_file, mode)
main_file = rm_comments(main_file)
if mode == 'translate_zh':
# find paper documentclass
pattern = re.compile(r'\\documentclass.*\n')
match = pattern.search(main_file)
assert match is not None, "Cannot find documentclass statement!"
position = match.end()
add_ctex = '\\usepackage{ctex}\n'
add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
# fontset=windows
import platform
main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
# find paper abstract
pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
match_opt1 = pattern_opt1.search(main_file)
match_opt2 = pattern_opt2.search(main_file)
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
return main_file
"""
========================================================================
Post process
========================================================================
"""
def mod_inbraket(match):
"""
为啥chatgpt会把cite里面的逗号换成中文逗号呀
"""
# get the matched string
cmd = match.group(1)
str_to_modify = match.group(2)
# modify the matched string
str_to_modify = str_to_modify.replace('', ':') # 前面是中文冒号,后面是英文冒号
str_to_modify = str_to_modify.replace('', ',') # 前面是中文逗号,后面是英文逗号
# str_to_modify = 'BOOM'
return "\\" + cmd + "{" + str_to_modify + "}"
def fix_content(final_tex, node_string):
"""
Fix common GPT errors to increase success rate
"""
final_tex = re.sub(r"(?<!\\)%", "\\%", final_tex)
final_tex = re.sub(r"\\([a-z]{2,10})\ \{", r"\\\1{", string=final_tex)
final_tex = re.sub(r"\\\ ([a-z]{2,10})\{", r"\\\1{", string=final_tex)
final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
if "Traceback" in final_tex and "[Local Message]" in final_tex:
final_tex = node_string # 出问题了,还原原文
if node_string.count('\\begin') != final_tex.count('\\begin'):
final_tex = node_string # 出问题了,还原原文
if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
# walk and replace any _ without \
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
def compute_brace_level(string):
# this function count the number of { and }
brace_level = 0
for c in string:
if c == "{": brace_level += 1
elif c == "}": brace_level -= 1
return brace_level
def join_most(tex_t, tex_o):
# this function join translated string and original string when something goes wrong
p_t = 0
p_o = 0
def find_next(string, chars, begin):
p = begin
while p < len(string):
if string[p] in chars: return p, string[p]
p += 1
return None, None
while True:
res1, char = find_next(tex_o, ['{','}'], p_o)
if res1 is None: break
res2, char = find_next(tex_t, [char], p_t)
if res2 is None: break
p_o = res1 + 1
p_t = res2 + 1
return tex_t[:p_t] + tex_o[p_o:]
if compute_brace_level(final_tex) != compute_brace_level(node_string):
# 出问题了,还原部分原文,保证括号正确
final_tex = join_most(final_tex, node_string)
return final_tex
def split_subprocess(txt, project_folder, return_dict, opts):
"""
break down latex file to a linked list,
each node use a preserve flag to indicate whether it should
be proccessed by GPT.
"""
text = txt
mask = np.zeros(len(txt), dtype=np.uint8) + TRANSFORM
# 吸收title与作者以上的部分
text, mask = set_forbidden_text(text, mask, r"(.*?)\\maketitle", re.DOTALL)
# 吸收iffalse注释
text, mask = set_forbidden_text(text, mask, r"\\iffalse(.*?)\\fi", re.DOTALL)
# 吸收在42行以内的begin-end组合
text, mask = set_forbidden_text_begin_end(text, mask, r"\\begin\{([a-z\*]*)\}(.*?)\\end\{\1\}", re.DOTALL, limit_n_lines=42)
# 吸收匿名公式
text, mask = set_forbidden_text(text, mask, [ r"\$\$([^$]+)\$\$", r"\\\[.*?\\\]" ], re.DOTALL)
# 吸收其他杂项
text, mask = set_forbidden_text(text, mask, [ r"\\section\{(.*?)\}", r"\\section\*\{(.*?)\}", r"\\subsection\{(.*?)\}", r"\\subsubsection\{(.*?)\}" ])
text, mask = set_forbidden_text(text, mask, [ r"\\bibliography\{(.*?)\}", r"\\bibliographystyle\{(.*?)\}" ])
text, mask = set_forbidden_text(text, mask, r"\\begin\{thebibliography\}.*?\\end\{thebibliography\}", re.DOTALL)
text, mask = set_forbidden_text(text, mask, r"\\begin\{lstlisting\}(.*?)\\end\{lstlisting\}", re.DOTALL)
text, mask = set_forbidden_text(text, mask, r"\\begin\{wraptable\}(.*?)\\end\{wraptable\}", re.DOTALL)
text, mask = set_forbidden_text(text, mask, r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}", re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\begin\{wrapfigure\}(.*?)\\end\{wrapfigure\}", r"\\begin\{wrapfigure\*\}(.*?)\\end\{wrapfigure\*\}"], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\begin\{figure\}(.*?)\\end\{figure\}", r"\\begin\{figure\*\}(.*?)\\end\{figure\*\}"], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\begin\{multline\}(.*?)\\end\{multline\}", r"\\begin\{multline\*\}(.*?)\\end\{multline\*\}"], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\begin\{table\}(.*?)\\end\{table\}", r"\\begin\{table\*\}(.*?)\\end\{table\*\}"], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\begin\{minipage\}(.*?)\\end\{minipage\}", r"\\begin\{minipage\*\}(.*?)\\end\{minipage\*\}"], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\begin\{align\*\}(.*?)\\end\{align\*\}", r"\\begin\{align\}(.*?)\\end\{align\}"], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\begin\{equation\}(.*?)\\end\{equation\}", r"\\begin\{equation\*\}(.*?)\\end\{equation\*\}"], re.DOTALL)
text, mask = set_forbidden_text(text, mask, [r"\\includepdf\[(.*?)\]\{(.*?)\}", r"\\clearpage", r"\\newpage", r"\\appendix", r"\\tableofcontents", r"\\include\{(.*?)\}"])
text, mask = set_forbidden_text(text, mask, [r"\\vspace\{(.*?)\}", r"\\hspace\{(.*?)\}", r"\\label\{(.*?)\}", r"\\begin\{(.*?)\}", r"\\end\{(.*?)\}", r"\\item "])
text, mask = set_forbidden_text_careful_brace(text, mask, r"\\hl\{(.*?)\}", re.DOTALL)
# reverse 操作必须放在最后
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\caption\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\abstract\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
text, mask = reverse_forbidden_text(text, mask, r"\\begin\{abstract\}(.*?)\\end\{abstract\}", re.DOTALL, forbid_wrapper=True)
root = convert_to_linklist(text, mask)
# 修复括号
node = root
while True:
string = node.string
if node.preserve:
node = node.next
if node is None: break
continue
def break_check(string):
str_stack = [""] # (lv, index)
for i, c in enumerate(string):
if c == '{':
str_stack.append('{')
elif c == '}':
if len(str_stack) == 1:
print('stack fix')
return i
str_stack.pop(-1)
else:
str_stack[-1] += c
return -1
bp = break_check(string)
if bp == -1:
pass
elif bp == 0:
node.string = string[:1]
q = LinkedListNode(string[1:], False)
q.next = node.next
node.next = q
else:
node.string = string[:bp]
q = LinkedListNode(string[bp:], False)
q.next = node.next
node.next = q
node = node.next
if node is None: break
# 屏蔽空行和太短的句子
node = root
while True:
if len(node.string.strip('\n').strip(''))==0: node.preserve = True
if len(node.string.strip('\n').strip(''))<42: node.preserve = True
node = node.next
if node is None: break
node = root
while True:
if node.next and node.preserve and node.next.preserve:
node.string += node.next.string
node.next = node.next.next
node = node.next
if node is None: break
# 将前后断行符脱离
node = root
prev_node = None
while True:
if not node.preserve:
lstriped_ = node.string.lstrip().lstrip('\n')
if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
prev_node.string += node.string[:-len(lstriped_)]
node.string = lstriped_
rstriped_ = node.string.rstrip().rstrip('\n')
if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
node.next.string = node.string[len(rstriped_):] + node.next.string
node.string = rstriped_
# =====
prev_node = node
node = node.next
if node is None: break
# 输出html调试文件用红色标注处保留区PRESERVE用黑色标注转换区TRANSFORM
with open(pj(project_folder, 'debug_log.html'), 'w', encoding='utf8') as f:
segment_parts_for_gpt = []
nodes = []
node = root
while True:
nodes.append(node)
show_html = node.string.replace('\n','<br/>')
if not node.preserve:
segment_parts_for_gpt.append(node.string)
f.write(f'<p style="color:black;">#{show_html}#</p>')
else:
f.write(f'<p style="color:red;">{show_html}</p>')
node = node.next
if node is None: break
for n in nodes: n.next = None # break
return_dict['nodes'] = nodes
return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt
return return_dict
class LatexPaperSplit():
"""
break down latex file to a linked list,
each node use a preserve flag to indicate whether it should
be proccessed by GPT.
"""
def __init__(self) -> None:
self.nodes = None
self.msg = "*{\\scriptsize\\textbf{警告该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成" + \
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
# 请您不要删除或修改这行警告除非您是论文的原作者如果您是论文原作者欢迎加REAME中的QQ联系开发者
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
def merge_result(self, arr, mode, msg):
"""
Merge the result after the GPT process completed
"""
result_string = ""
p = 0
for node in self.nodes:
if node.preserve:
result_string += node.string
else:
result_string += fix_content(arr[p], node.string)
p += 1
if mode == 'translate_zh':
pattern = re.compile(r'\\begin\{abstract\}.*\n')
match = pattern.search(result_string)
if not match:
# match \abstract{xxxx}
pattern_compile = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
match = pattern_compile.search(result_string)
position = match.regs[1][0]
else:
# match \begin{abstract}xxxx\end{abstract}
position = match.end()
result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:]
return result_string
def split(self, txt, project_folder, opts):
"""
break down latex file to a linked list,
each node use a preserve flag to indicate whether it should
be proccessed by GPT.
P.S. use multiprocessing to avoid timeout error
"""
import multiprocessing
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(
target=split_subprocess,
args=(txt, project_folder, return_dict, opts))
p.start()
p.join()
p.close()
self.nodes = return_dict['nodes']
self.sp = return_dict['segment_parts_for_gpt']
return self.sp
class LatexPaperFileGroup():
"""
use tokenizer to break down text according to max_token_limit
"""
def __init__(self):
self.file_paths = []
self.file_contents = []
self.sp_file_contents = []
self.sp_file_index = []
self.sp_file_tag = []
# count_token
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num
def run_file_split(self, max_token_limit=1900):
"""
use tokenizer to break down text according to max_token_limit
"""
for index, file_content in enumerate(self.file_contents):
if self.get_token_num(file_content) < max_token_limit:
self.sp_file_contents.append(file_content)
self.sp_file_index.append(index)
self.sp_file_tag.append(self.file_paths[index])
else:
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
for j, segment in enumerate(segments):
self.sp_file_contents.append(segment)
self.sp_file_index.append(index)
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
print('Segmentation: done')
def merge_result(self):
self.file_result = ["" for _ in range(len(self.file_paths))]
for r, k in zip(self.sp_file_result, self.sp_file_index):
self.file_result[k] += r
def write_result(self):
manifest = []
for path, res in zip(self.file_paths, self.file_result):
with open(path + '.polish.tex', 'w', encoding='utf8') as f:
manifest.append(path + '.polish.tex')
f.write(res)
return manifest
def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
# write html
try:
import shutil
from .crazy_utils import construct_html
from toolbox import gen_time_str
ch = construct_html()
orig = ""
trans = ""
final = []
for c,r in zip(sp_file_contents, sp_file_result):
final.append(c)
final.append(r)
for i, k in enumerate(final):
if i%2==0:
orig = k
if i%2==1:
trans = k
ch.add_row(a=orig, b=trans)
create_report_file_name = f"{gen_time_str()}.trans.html"
ch.save_file(create_report_file_name)
shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name))
promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot)
except:
from toolbox import trimmed_format_exc
print('writing html result failed:', trimmed_format_exc())
def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]):
import time, os, re
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from .latex_utils import LatexPaperFileGroup, merge_tex_files, LatexPaperSplit, 寻找Latex主文件
# <-------- 寻找主tex文件 ---------->
maintex = 寻找Latex主文件(file_manifest, mode)
chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。'))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
time.sleep(3)
# <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ---------->
main_tex_basename = os.path.basename(maintex)
assert main_tex_basename.endswith('.tex')
main_tex_basename_bare = main_tex_basename[:-4]
may_exist_bbl = pj(project_folder, f'{main_tex_basename_bare}.bbl')
if os.path.exists(may_exist_bbl):
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge.bbl'))
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_{mode}.bbl'))
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_diff.bbl'))
with open(maintex, 'r', encoding='utf-8', errors='replace') as f:
content = f.read()
merged_content = merge_tex_files(project_folder, content, mode)
with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f:
f.write(merged_content)
# <-------- 精细切分latex文件 ---------->
chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件这需要一段时间计算文档越长耗时越长请耐心等待。'))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
lps = LatexPaperSplit()
res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
# <-------- 拆分过长的latex片段 ---------->
pfg = LatexPaperFileGroup()
for index, r in enumerate(res):
pfg.file_paths.append('segment-' + str(index))
pfg.file_contents.append(r)
pfg.run_file_split(max_token_limit=1024)
n_split = len(pfg.sp_file_contents)
# <-------- 根据需要切换prompt ---------->
inputs_array, sys_prompt_array = switch_prompt(pfg, mode)
inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag]
if os.path.exists(pj(project_folder,'temp.pkl')):
# <-------- 【仅调试】如果存在调试缓存文件则跳过GPT请求环节 ---------->
pfg = objload(file=pj(project_folder,'temp.pkl'))
else:
# <-------- gpt 多线程请求 ---------->
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array=inputs_array,
inputs_show_user_array=inputs_show_user_array,
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history_array=[[""] for _ in range(n_split)],
sys_prompt_array=sys_prompt_array,
# max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
scroller_max_len = 40
)
# <-------- 文本碎片重组为完整的tex片段 ---------->
pfg.sp_file_result = []
for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents):
pfg.sp_file_result.append(gpt_say)
pfg.merge_result()
# <-------- 临时存储用于调试 ---------->
pfg.get_token_num = None
objdump(pfg, file=pj(project_folder,'temp.pkl'))
write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder)
# <-------- 写出文件 ---------->
msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}"
final_tex = lps.merge_result(pfg.file_result, mode, msg)
with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f:
if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex)
# <-------- 整理结果, 退出 ---------->
chatbot.append((f"完成了吗?", 'GPT结果已输出, 正在编译PDF'))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# <-------- 返回 ---------->
return project_folder + f'/merge_{mode}.tex'
def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work_folder_modified):
try:
with open(log_path, 'r', encoding='utf-8', errors='replace') as f:
log = f.read()
with open(file_path, 'r', encoding='utf-8', errors='replace') as f:
file_lines = f.readlines()
import re
buggy_lines = re.findall(tex_name+':([0-9]{1,5}):', log)
buggy_lines = [int(l) for l in buggy_lines]
buggy_lines = sorted(buggy_lines)
print("removing lines that has errors", buggy_lines)
file_lines.pop(buggy_lines[0]-1)
with open(pj(work_folder_modified, f"{tex_name_pure}_fix_{n_fix}.tex"), 'w', encoding='utf-8', errors='replace') as f:
f.writelines(file_lines)
return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines
except:
print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.")
return False, -1, [-1]
def compile_latex_with_timeout(command, cwd, timeout=60):
import subprocess
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
print("Process timed out!")
return False
return True
def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder, mode='default'):
import os, time
current_dir = os.getcwd()
n_fix = 1
max_try = 32
chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder}如果程序停顿5分钟以上请直接去该路径下取回翻译结果或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history)
chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面
yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面
while True:
import os
# https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')):
# 只有第二步成功,才能继续下面的步骤
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')):
ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original)
if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')):
ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified)
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
if mode!='translate_zh':
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
# <---------- 检查结果 ----------->
results_ = ""
original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf'))
modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf'))
diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf'))
results_ += f"原始PDF编译是否成功: {original_pdf_success};"
results_ += f"转化PDF编译是否成功: {modified_pdf_success};"
results_ += f"对比PDF编译是否成功: {diff_pdf_success};"
yield from update_ui_lastest_msg(f'{n_fix}编译结束:<br/>{results_}...', chatbot, history) # 刷新Gradio前端界面
if diff_pdf_success:
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
if modified_pdf_success:
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
if os.path.exists(pj(work_folder, '..', 'translation')):
shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf'))
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
return True # 成功啦
else:
if n_fix>=max_try: break
n_fix += 1
can_retry, main_file_modified, buggy_lines = remove_buggy_lines(
file_path=pj(work_folder_modified, f'{main_file_modified}.tex'),
log_path=pj(work_folder_modified, f'{main_file_modified}.log'),
tex_name=f'{main_file_modified}.tex',
tex_name_pure=f'{main_file_modified}',
n_fix=n_fix,
work_folder_modified=work_folder_modified,
)
yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
if not can_retry: break
return False # 失败啦

View File

@ -0,0 +1,89 @@
import time, threading, json
class AliyunASR():
def test_on_sentence_begin(self, message, *args):
# print("test_on_sentence_begin:{}".format(message))
pass
def test_on_sentence_end(self, message, *args):
# print("test_on_sentence_end:{}".format(message))
message = json.loads(message)
self.parsed_sentence = message['payload']['result']
self.event_on_entence_end.set()
print(self.parsed_sentence)
def test_on_start(self, message, *args):
# print("test_on_start:{}".format(message))
pass
def test_on_error(self, message, *args):
# print("on_error args=>{}".format(args))
pass
def test_on_close(self, *args):
# print("on_close: args=>{}".format(args))
pass
def test_on_result_chg(self, message, *args):
# print("test_on_chg:{}".format(message))
message = json.loads(message)
self.parsed_text = message['payload']['result']
self.event_on_result_chg.set()
def test_on_completed(self, message, *args):
# print("on_completed:args=>{} message=>{}".format(args, message))
pass
def audio_convertion_thread(self, uuid):
# 在一个异步线程中采集音频
import nls # pip install git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
import tempfile
from scipy import io
from toolbox import get_conf
from .audio_io import change_sample_rate
from .audio_io import RealtimeAudioDistribution
NEW_SAMPLERATE = 16000
rad = RealtimeAudioDistribution()
rad.clean_up()
temp_folder = tempfile.gettempdir()
TOKEN, APPKEY = get_conf('ALIYUN_TOKEN', 'ALIYUN_APPKEY')
URL="wss://nls-gateway.aliyuncs.com/ws/v1"
sr = nls.NlsSpeechTranscriber(
url=URL,
token=TOKEN,
appkey=APPKEY,
on_sentence_begin=self.test_on_sentence_begin,
on_sentence_end=self.test_on_sentence_end,
on_start=self.test_on_start,
on_result_changed=self.test_on_result_chg,
on_completed=self.test_on_completed,
on_error=self.test_on_error,
on_close=self.test_on_close,
callback_args=[uuid.hex]
)
r = sr.start(aformat="pcm",
enable_intermediate_result=True,
enable_punctuation_prediction=True,
enable_inverse_text_normalization=True)
while not self.stop:
# time.sleep(self.capture_interval)
audio = rad.read(uuid.hex)
if audio is not None:
# convert to pcm file
temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
io.wavfile.write(temp_file, NEW_SAMPLERATE, dsdata)
# read pcm binary
with open(temp_file, "rb") as f: data = f.read()
# print('audio len:', len(audio), '\t ds len:', len(dsdata), '\t need n send:', len(data)//640)
slices = zip(*(iter(data),) * 640) # 640个字节为一组
for i in slices: sr.send_audio(bytes(i))
else:
time.sleep(0.1)
r = sr.stop()

View File

@ -0,0 +1,51 @@
import numpy as np
from scipy import interpolate
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class RealtimeAudioDistribution():
def __init__(self) -> None:
self.data = {}
self.max_len = 1024*1024
self.rate = 48000 # 只读,每秒采样数量
def clean_up(self):
self.data = {}
def feed(self, uuid, audio):
self.rate, audio_ = audio
# print('feed', len(audio_), audio_[-25:])
if uuid not in self.data:
self.data[uuid] = audio_
else:
new_arr = np.concatenate((self.data[uuid], audio_))
if len(new_arr) > self.max_len: new_arr = new_arr[-self.max_len:]
self.data[uuid] = new_arr
def read(self, uuid):
if uuid in self.data:
res = self.data.pop(uuid)
print('\r read-', len(res), '-', max(res), end='', flush=True)
else:
res = None
return res
def change_sample_rate(audio, old_sr, new_sr):
duration = audio.shape[0] / old_sr
time_old = np.linspace(0, duration, audio.shape[0])
time_new = np.linspace(0, duration, int(audio.shape[0] * new_sr / old_sr))
interpolator = interpolate.interp1d(time_old, audio.T)
new_audio = interpolator(time_new).T
return new_audio.astype(np.int16)

View File

@ -1,87 +0,0 @@
#include "libipc/buffer.h"
#include "libipc/utility/pimpl.h"
#include <cstring>
namespace ipc {
bool operator==(buffer const & b1, buffer const & b2) {
return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0);
}
bool operator!=(buffer const & b1, buffer const & b2) {
return !(b1 == b2);
}
class buffer::buffer_ : public pimpl<buffer_> {
public:
void* p_;
std::size_t s_;
void* a_;
buffer::destructor_t d_;
buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a)
: p_(p), s_(s), a_(a), d_(d) {
}
~buffer_() {
if (d_ == nullptr) return;
d_((a_ == nullptr) ? p_ : a_, s_);
}
};
buffer::buffer()
: buffer(nullptr, 0, nullptr, nullptr) {
}
buffer::buffer(void* p, std::size_t s, destructor_t d)
: p_(p_->make(p, s, d, nullptr)) {
}
buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional)
: p_(p_->make(p, s, d, additional)) {
}
buffer::buffer(void* p, std::size_t s)
: buffer(p, s, nullptr) {
}
buffer::buffer(char const & c)
: buffer(const_cast<char*>(&c), 1) {
}
buffer::buffer(buffer&& rhs)
: buffer() {
swap(rhs);
}
buffer::~buffer() {
p_->clear();
}
void buffer::swap(buffer& rhs) {
std::swap(p_, rhs.p_);
}
buffer& buffer::operator=(buffer rhs) {
swap(rhs);
return *this;
}
bool buffer::empty() const noexcept {
return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0);
}
void* buffer::data() noexcept {
return impl(p_)->p_;
}
void const * buffer::data() const noexcept {
return impl(p_)->p_;
}
std::size_t buffer::size() const noexcept {
return impl(p_)->s_;
}
} // namespace ipc

View File

@ -1,701 +0,0 @@
#include <type_traits>
#include <cstring>
#include <algorithm>
#include <utility> // std::pair, std::move, std::forward
#include <atomic>
#include <type_traits> // aligned_storage_t
#include <string>
#include <vector>
#include <array>
#include <cassert>
#include "libipc/ipc.h"
#include "libipc/def.h"
#include "libipc/shm.h"
#include "libipc/pool_alloc.h"
#include "libipc/queue.h"
#include "libipc/policy.h"
#include "libipc/rw_lock.h"
#include "libipc/waiter.h"
#include "libipc/utility/log.h"
#include "libipc/utility/id_pool.h"
#include "libipc/utility/scope_guard.h"
#include "libipc/utility/utility.h"
#include "libipc/memory/resource.h"
#include "libipc/platform/detail.h"
#include "libipc/circ/elem_array.h"
namespace {
using msg_id_t = std::uint32_t;
using acc_t = std::atomic<msg_id_t>;
template <std::size_t DataSize, std::size_t AlignSize>
struct msg_t;
template <std::size_t AlignSize>
struct msg_t<0, AlignSize> {
msg_id_t cc_id_;
msg_id_t id_;
std::int32_t remain_;
bool storage_;
};
template <std::size_t DataSize, std::size_t AlignSize>
struct msg_t : msg_t<0, AlignSize> {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
msg_t() = default;
msg_t(msg_id_t cc_id, msg_id_t id, std::int32_t remain, void const * data, std::size_t size)
: msg_t<0, AlignSize> {cc_id, id, remain, (data == nullptr) || (size == 0)} {
if (this->storage_) {
if (data != nullptr) {
// copy storage-id
*reinterpret_cast<ipc::storage_id_t*>(&data_) =
*static_cast<ipc::storage_id_t const *>(data);
}
}
else std::memcpy(&data_, data, size);
}
};
template <typename T>
ipc::buff_t make_cache(T& data, std::size_t size) {
auto ptr = ipc::mem::alloc(size);
std::memcpy(ptr, &data, (ipc::detail::min)(sizeof(data), size));
return { ptr, size, ipc::mem::free };
}
struct cache_t {
std::size_t fill_;
ipc::buff_t buff_;
cache_t(std::size_t f, ipc::buff_t && b)
: fill_(f), buff_(std::move(b))
{}
void append(void const * data, std::size_t size) {
if (fill_ >= buff_.size() || data == nullptr || size == 0) return;
auto new_fill = (ipc::detail::min)(fill_ + size, buff_.size());
std::memcpy(static_cast<ipc::byte_t*>(buff_.data()) + fill_, data, new_fill - fill_);
fill_ = new_fill;
}
};
auto cc_acc() {
static ipc::shm::handle acc_h("__CA_CONN__", sizeof(acc_t));
return static_cast<acc_t*>(acc_h.get());
}
IPC_CONSTEXPR_ std::size_t align_chunk_size(std::size_t size) noexcept {
return (((size - 1) / ipc::large_msg_align) + 1) * ipc::large_msg_align;
}
IPC_CONSTEXPR_ std::size_t calc_chunk_size(std::size_t size) noexcept {
return ipc::make_align(alignof(std::max_align_t), align_chunk_size(
ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>)) + size));
}
struct chunk_t {
std::atomic<ipc::circ::cc_t> &conns() noexcept {
return *reinterpret_cast<std::atomic<ipc::circ::cc_t> *>(this);
}
void *data() noexcept {
return reinterpret_cast<ipc::byte_t *>(this)
+ ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>));
}
};
struct chunk_info_t {
ipc::id_pool<> pool_;
ipc::spin_lock lock_;
IPC_CONSTEXPR_ static std::size_t chunks_mem_size(std::size_t chunk_size) noexcept {
return ipc::id_pool<>::max_count * chunk_size;
}
ipc::byte_t *chunks_mem() noexcept {
return reinterpret_cast<ipc::byte_t *>(this + 1);
}
chunk_t *at(std::size_t chunk_size, ipc::storage_id_t id) noexcept {
if (id < 0) return nullptr;
return reinterpret_cast<chunk_t *>(chunks_mem() + (chunk_size * id));
}
};
auto& chunk_storages() {
class chunk_handle_t {
ipc::shm::handle handle_;
public:
chunk_info_t *get_info(std::size_t chunk_size) {
if (!handle_.valid() &&
!handle_.acquire( ("__CHUNK_INFO__" + ipc::to_string(chunk_size)).c_str(),
sizeof(chunk_info_t) + chunk_info_t::chunks_mem_size(chunk_size) )) {
ipc::error("[chunk_storages] chunk_shm.id_info_.acquire failed: chunk_size = %zd\n", chunk_size);
return nullptr;
}
auto info = static_cast<chunk_info_t*>(handle_.get());
if (info == nullptr) {
ipc::error("[chunk_storages] chunk_shm.id_info_.get failed: chunk_size = %zd\n", chunk_size);
return nullptr;
}
return info;
}
};
static ipc::map<std::size_t, chunk_handle_t> chunk_hs;
return chunk_hs;
}
chunk_info_t *chunk_storage_info(std::size_t chunk_size) {
auto &storages = chunk_storages();
std::decay_t<decltype(storages)>::iterator it;
{
static ipc::rw_lock lock;
IPC_UNUSED_ std::shared_lock<ipc::rw_lock> guard {lock};
if ((it = storages.find(chunk_size)) == storages.end()) {
using chunk_handle_t = std::decay_t<decltype(storages)>::value_type::second_type;
guard.unlock();
IPC_UNUSED_ std::lock_guard<ipc::rw_lock> guard {lock};
it = storages.emplace(chunk_size, chunk_handle_t{}).first;
}
}
return it->second.get_info(chunk_size);
}
std::pair<ipc::storage_id_t, void*> acquire_storage(std::size_t size, ipc::circ::cc_t conns) {
std::size_t chunk_size = calc_chunk_size(size);
auto info = chunk_storage_info(chunk_size);
if (info == nullptr) return {};
info->lock_.lock();
info->pool_.prepare();
// got an unique id
auto id = info->pool_.acquire();
info->lock_.unlock();
auto chunk = info->at(chunk_size, id);
if (chunk == nullptr) return {};
chunk->conns().store(conns, std::memory_order_relaxed);
return { id, chunk->data() };
}
void *find_storage(ipc::storage_id_t id, std::size_t size) {
if (id < 0) {
ipc::error("[find_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
return nullptr;
}
std::size_t chunk_size = calc_chunk_size(size);
auto info = chunk_storage_info(chunk_size);
if (info == nullptr) return nullptr;
return info->at(chunk_size, id)->data();
}
void release_storage(ipc::storage_id_t id, std::size_t size) {
if (id < 0) {
ipc::error("[release_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
return;
}
std::size_t chunk_size = calc_chunk_size(size);
auto info = chunk_storage_info(chunk_size);
if (info == nullptr) return;
info->lock_.lock();
info->pool_.release(id);
info->lock_.unlock();
}
template <ipc::relat Rp, ipc::relat Rc>
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::unicast>,
std::atomic<ipc::circ::cc_t> &/*conns*/, ipc::circ::cc_t /*curr_conns*/, ipc::circ::cc_t /*conn_id*/) noexcept {
return true;
}
template <ipc::relat Rp, ipc::relat Rc>
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::broadcast>,
std::atomic<ipc::circ::cc_t> &conns, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) noexcept {
auto last_conns = curr_conns & ~conn_id;
for (unsigned k = 0;;) {
auto chunk_conns = conns.load(std::memory_order_acquire);
if (conns.compare_exchange_weak(chunk_conns, chunk_conns & last_conns, std::memory_order_release)) {
return (chunk_conns & last_conns) == 0;
}
ipc::yield(k);
}
}
template <typename Flag>
void recycle_storage(ipc::storage_id_t id, std::size_t size, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) {
if (id < 0) {
ipc::error("[recycle_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
return;
}
std::size_t chunk_size = calc_chunk_size(size);
auto info = chunk_storage_info(chunk_size);
if (info == nullptr) return;
auto chunk = info->at(chunk_size, id);
if (chunk == nullptr) return;
if (!sub_rc(Flag{}, chunk->conns(), curr_conns, conn_id)) {
return;
}
info->lock_.lock();
info->pool_.release(id);
info->lock_.unlock();
}
template <typename MsgT>
bool clear_message(void* p) {
auto msg = static_cast<MsgT*>(p);
if (msg->storage_) {
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg->remain_;
if (r_size <= 0) {
ipc::error("[clear_message] invalid msg size: %d\n", (int)r_size);
return true;
}
release_storage(
*reinterpret_cast<ipc::storage_id_t*>(&msg->data_),
static_cast<std::size_t>(r_size));
}
return true;
}
struct conn_info_head {
ipc::string name_;
msg_id_t cc_id_; // connection-info id
ipc::detail::waiter cc_waiter_, wt_waiter_, rd_waiter_;
ipc::shm::handle acc_h_;
conn_info_head(char const * name)
: name_ {name}
, cc_id_ {(cc_acc() == nullptr) ? 0 : cc_acc()->fetch_add(1, std::memory_order_relaxed)}
, cc_waiter_{("__CC_CONN__" + name_).c_str()}
, wt_waiter_{("__WT_CONN__" + name_).c_str()}
, rd_waiter_{("__RD_CONN__" + name_).c_str()}
, acc_h_ {("__AC_CONN__" + name_).c_str(), sizeof(acc_t)} {
}
void quit_waiting() {
cc_waiter_.quit_waiting();
wt_waiter_.quit_waiting();
rd_waiter_.quit_waiting();
}
auto acc() {
return static_cast<acc_t*>(acc_h_.get());
}
auto& recv_cache() {
thread_local ipc::unordered_map<msg_id_t, cache_t> tls;
return tls;
}
};
template <typename W, typename F>
bool wait_for(W& waiter, F&& pred, std::uint64_t tm) {
if (tm == 0) return !pred();
for (unsigned k = 0; pred();) {
bool ret = true;
ipc::sleep(k, [&k, &ret, &waiter, &pred, tm] {
ret = waiter.wait_if(std::forward<F>(pred), tm);
k = 0;
});
if (!ret) return false; // timeout or fail
if (k == 0) break; // k has been reset
}
return true;
}
template <typename Policy,
std::size_t DataSize = ipc::data_length,
std::size_t AlignSize = (ipc::detail::min)(DataSize, alignof(std::max_align_t))>
struct queue_generator {
using queue_t = ipc::queue<msg_t<DataSize, AlignSize>, Policy>;
struct conn_info_t : conn_info_head {
queue_t que_;
conn_info_t(char const * name)
: conn_info_head{name}
, que_{("__QU_CONN__" +
ipc::to_string(DataSize) + "__" +
ipc::to_string(AlignSize) + "__" + name).c_str()} {
}
void disconnect_receiver() {
bool dis = que_.disconnect();
this->quit_waiting();
if (dis) {
this->recv_cache().clear();
}
}
};
};
template <typename Policy>
struct detail_impl {
using policy_t = Policy;
using flag_t = typename policy_t::flag_t;
using queue_t = typename queue_generator<policy_t>::queue_t;
using conn_info_t = typename queue_generator<policy_t>::conn_info_t;
constexpr static conn_info_t* info_of(ipc::handle_t h) noexcept {
return static_cast<conn_info_t*>(h);
}
constexpr static queue_t* queue_of(ipc::handle_t h) noexcept {
return (info_of(h) == nullptr) ? nullptr : &(info_of(h)->que_);
}
/* API implementations */
static void disconnect(ipc::handle_t h) {
auto que = queue_of(h);
if (que == nullptr) {
return;
}
que->shut_sending();
assert(info_of(h) != nullptr);
info_of(h)->disconnect_receiver();
}
static bool reconnect(ipc::handle_t * ph, bool start_to_recv) {
assert(ph != nullptr);
assert(*ph != nullptr);
auto que = queue_of(*ph);
if (que == nullptr) {
return false;
}
if (start_to_recv) {
que->shut_sending();
if (que->connect()) { // wouldn't connect twice
info_of(*ph)->cc_waiter_.broadcast();
return true;
}
return false;
}
// start_to_recv == false
if (que->connected()) {
info_of(*ph)->disconnect_receiver();
}
return que->ready_sending();
}
static bool connect(ipc::handle_t * ph, char const * name, bool start_to_recv) {
assert(ph != nullptr);
if (*ph == nullptr) {
*ph = ipc::mem::alloc<conn_info_t>(name);
}
return reconnect(ph, start_to_recv);
}
static void destroy(ipc::handle_t h) {
disconnect(h);
ipc::mem::free(info_of(h));
}
static std::size_t recv_count(ipc::handle_t h) noexcept {
auto que = queue_of(h);
if (que == nullptr) {
return ipc::invalid_value;
}
return que->conn_count();
}
static bool wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
auto que = queue_of(h);
if (que == nullptr) {
return false;
}
return wait_for(info_of(h)->cc_waiter_, [que, r_count] {
return que->conn_count() < r_count;
}, tm);
}
template <typename F>
static bool send(F&& gen_push, ipc::handle_t h, void const * data, std::size_t size) {
if (data == nullptr || size == 0) {
ipc::error("fail: send(%p, %zd)\n", data, size);
return false;
}
auto que = queue_of(h);
if (que == nullptr) {
ipc::error("fail: send, queue_of(h) == nullptr\n");
return false;
}
if (que->elems() == nullptr) {
ipc::error("fail: send, queue_of(h)->elems() == nullptr\n");
return false;
}
if (!que->ready_sending()) {
ipc::error("fail: send, que->ready_sending() == false\n");
return false;
}
ipc::circ::cc_t conns = que->elems()->connections(std::memory_order_relaxed);
if (conns == 0) {
ipc::error("fail: send, there is no receiver on this connection.\n");
return false;
}
// calc a new message id
auto acc = info_of(h)->acc();
if (acc == nullptr) {
ipc::error("fail: send, info_of(h)->acc() == nullptr\n");
return false;
}
auto msg_id = acc->fetch_add(1, std::memory_order_relaxed);
auto try_push = std::forward<F>(gen_push)(info_of(h), que, msg_id);
if (size > ipc::large_msg_limit) {
auto dat = acquire_storage(size, conns);
void * buf = dat.second;
if (buf != nullptr) {
std::memcpy(buf, data, size);
return try_push(static_cast<std::int32_t>(size) -
static_cast<std::int32_t>(ipc::data_length), &(dat.first), 0);
}
// try using message fragment
//ipc::log("fail: shm::handle for big message. msg_id: %zd, size: %zd\n", msg_id, size);
}
// push message fragment
std::int32_t offset = 0;
for (std::int32_t i = 0; i < static_cast<std::int32_t>(size / ipc::data_length); ++i, offset += ipc::data_length) {
if (!try_push(static_cast<std::int32_t>(size) - offset - static_cast<std::int32_t>(ipc::data_length),
static_cast<ipc::byte_t const *>(data) + offset, ipc::data_length)) {
return false;
}
}
// if remain > 0, this is the last message fragment
std::int32_t remain = static_cast<std::int32_t>(size) - offset;
if (remain > 0) {
if (!try_push(remain - static_cast<std::int32_t>(ipc::data_length),
static_cast<ipc::byte_t const *>(data) + offset,
static_cast<std::size_t>(remain))) {
return false;
}
}
return true;
}
static bool send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
return send([tm](auto info, auto que, auto msg_id) {
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
if (!wait_for(info->wt_waiter_, [&] {
return !que->push(
[](void*) { return true; },
info->cc_id_, msg_id, remain, data, size);
}, tm)) {
ipc::log("force_push: msg_id = %zd, remain = %d, size = %zd\n", msg_id, remain, size);
if (!que->force_push(
clear_message<typename queue_t::value_t>,
info->cc_id_, msg_id, remain, data, size)) {
return false;
}
}
info->rd_waiter_.broadcast();
return true;
};
}, h, data, size);
}
static bool try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
return send([tm](auto info, auto que, auto msg_id) {
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
if (!wait_for(info->wt_waiter_, [&] {
return !que->push(
[](void*) { return true; },
info->cc_id_, msg_id, remain, data, size);
}, tm)) {
return false;
}
info->rd_waiter_.broadcast();
return true;
};
}, h, data, size);
}
static ipc::buff_t recv(ipc::handle_t h, std::uint64_t tm) {
auto que = queue_of(h);
if (que == nullptr) {
ipc::error("fail: recv, queue_of(h) == nullptr\n");
return {};
}
if (!que->connected()) {
// hasn't connected yet, just return.
return {};
}
auto& rc = info_of(h)->recv_cache();
for (;;) {
// pop a new message
typename queue_t::value_t msg;
if (!wait_for(info_of(h)->rd_waiter_, [que, &msg] {
return !que->pop(msg);
}, tm)) {
// pop failed, just return.
return {};
}
info_of(h)->wt_waiter_.broadcast();
if ((info_of(h)->acc() != nullptr) && (msg.cc_id_ == info_of(h)->cc_id_)) {
continue; // ignore message to self
}
// msg.remain_ may minus & abs(msg.remain_) < data_length
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg.remain_;
if (r_size <= 0) {
ipc::error("fail: recv, r_size = %d\n", (int)r_size);
return {};
}
std::size_t msg_size = static_cast<std::size_t>(r_size);
// large message
if (msg.storage_) {
ipc::storage_id_t buf_id = *reinterpret_cast<ipc::storage_id_t*>(&msg.data_);
void* buf = find_storage(buf_id, msg_size);
if (buf != nullptr) {
struct recycle_t {
ipc::storage_id_t storage_id;
ipc::circ::cc_t curr_conns;
ipc::circ::cc_t conn_id;
} *r_info = ipc::mem::alloc<recycle_t>(recycle_t{
buf_id, que->elems()->connections(std::memory_order_relaxed), que->connected_id()
});
if (r_info == nullptr) {
ipc::log("fail: ipc::mem::alloc<recycle_t>.\n");
return ipc::buff_t{buf, msg_size}; // no recycle
} else {
return ipc::buff_t{buf, msg_size, [](void* p_info, std::size_t size) {
auto r_info = static_cast<recycle_t *>(p_info);
IPC_UNUSED_ auto finally = ipc::guard([r_info] {
ipc::mem::free(r_info);
});
recycle_storage<flag_t>(r_info->storage_id, size, r_info->curr_conns, r_info->conn_id);
}, r_info};
}
} else {
ipc::log("fail: shm::handle for large message. msg_id: %zd, buf_id: %zd, size: %zd\n", msg.id_, buf_id, msg_size);
continue;
}
}
// find cache with msg.id_
auto cac_it = rc.find(msg.id_);
if (cac_it == rc.end()) {
if (msg_size <= ipc::data_length) {
return make_cache(msg.data_, msg_size);
}
// gc
if (rc.size() > 1024) {
std::vector<msg_id_t> need_del;
for (auto const & pair : rc) {
auto cmp = std::minmax(msg.id_, pair.first);
if (cmp.second - cmp.first > 8192) {
need_del.push_back(pair.first);
}
}
for (auto id : need_del) rc.erase(id);
}
// cache the first message fragment
rc.emplace(msg.id_, cache_t { ipc::data_length, make_cache(msg.data_, msg_size) });
}
// has cached before this message
else {
auto& cac = cac_it->second;
// this is the last message fragment
if (msg.remain_ <= 0) {
cac.append(&(msg.data_), msg_size);
// finish this message, erase it from cache
auto buff = std::move(cac.buff_);
rc.erase(cac_it);
return buff;
}
// there are remain datas after this message
cac.append(&(msg.data_), ipc::data_length);
}
}
}
static ipc::buff_t try_recv(ipc::handle_t h) {
return recv(h, 0);
}
}; // detail_impl<Policy>
template <typename Flag>
using policy_t = ipc::policy::choose<ipc::circ::elem_array, Flag>;
} // internal-linkage
namespace ipc {
template <typename Flag>
ipc::handle_t chan_impl<Flag>::inited() {
ipc::detail::waiter::init();
return nullptr;
}
template <typename Flag>
bool chan_impl<Flag>::connect(ipc::handle_t * ph, char const * name, unsigned mode) {
return detail_impl<policy_t<Flag>>::connect(ph, name, mode & receiver);
}
template <typename Flag>
bool chan_impl<Flag>::reconnect(ipc::handle_t * ph, unsigned mode) {
return detail_impl<policy_t<Flag>>::reconnect(ph, mode & receiver);
}
template <typename Flag>
void chan_impl<Flag>::disconnect(ipc::handle_t h) {
detail_impl<policy_t<Flag>>::disconnect(h);
}
template <typename Flag>
void chan_impl<Flag>::destroy(ipc::handle_t h) {
detail_impl<policy_t<Flag>>::destroy(h);
}
template <typename Flag>
char const * chan_impl<Flag>::name(ipc::handle_t h) {
auto info = detail_impl<policy_t<Flag>>::info_of(h);
return (info == nullptr) ? nullptr : info->name_.c_str();
}
template <typename Flag>
std::size_t chan_impl<Flag>::recv_count(ipc::handle_t h) {
return detail_impl<policy_t<Flag>>::recv_count(h);
}
template <typename Flag>
bool chan_impl<Flag>::wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
return detail_impl<policy_t<Flag>>::wait_for_recv(h, r_count, tm);
}
template <typename Flag>
bool chan_impl<Flag>::send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
return detail_impl<policy_t<Flag>>::send(h, data, size, tm);
}
template <typename Flag>
buff_t chan_impl<Flag>::recv(ipc::handle_t h, std::uint64_t tm) {
return detail_impl<policy_t<Flag>>::recv(h, tm);
}
template <typename Flag>
bool chan_impl<Flag>::try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
return detail_impl<policy_t<Flag>>::try_send(h, data, size, tm);
}
template <typename Flag>
buff_t chan_impl<Flag>::try_recv(ipc::handle_t h) {
return detail_impl<policy_t<Flag>>::try_recv(h);
}
template struct chan_impl<ipc::wr<relat::single, relat::single, trans::unicast >>;
// template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::unicast >>; // TBD
// template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::unicast >>; // TBD
template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::broadcast>>;
template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::broadcast>>;
} // namespace ipc

View File

@ -1,25 +0,0 @@
#pragma once
#include <type_traits>
#include "libipc/def.h"
#include "libipc/prod_cons.h"
#include "libipc/circ/elem_array.h"
namespace ipc {
namespace policy {
template <template <typename, std::size_t...> class Elems, typename Flag>
struct choose;
template <typename Flag>
struct choose<circ::elem_array, Flag> {
using flag_t = Flag;
template <std::size_t DataSize, std::size_t AlignSize>
using elems_t = circ::elem_array<ipc::prod_cons_impl<flag_t>, DataSize, AlignSize>;
};
} // namespace policy
} // namespace ipc

View File

@ -1,17 +0,0 @@
#include "libipc/pool_alloc.h"
#include "libipc/memory/resource.h"
namespace ipc {
namespace mem {
void* pool_alloc::alloc(std::size_t size) {
return async_pool_alloc::alloc(size);
}
void pool_alloc::free(void* p, std::size_t size) {
async_pool_alloc::free(p, size);
}
} // namespace mem
} // namespace ipc

View File

@ -1,433 +0,0 @@
#pragma once
#include <atomic>
#include <utility>
#include <cstring>
#include <type_traits>
#include <cstdint>
#include "libipc/def.h"
#include "libipc/platform/detail.h"
#include "libipc/circ/elem_def.h"
#include "libipc/utility/log.h"
#include "libipc/utility/utility.h"
namespace ipc {
////////////////////////////////////////////////////////////////
/// producer-consumer implementation
////////////////////////////////////////////////////////////////
template <typename Flag>
struct prod_cons_impl;
template <>
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
};
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
constexpr circ::u2_t cursor() const noexcept {
return 0;
}
template <typename W, typename F, typename E>
bool push(W* /*wrapper*/, F&& f, E* elems) {
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
return false; // full
}
std::forward<F>(f)(&(elems[cur_wt].data_));
wt_.fetch_add(1, std::memory_order_release);
return true;
}
/**
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
* So we could just disconnect all connections of receiver, and return false.
*/
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&&, E*) {
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
return false;
}
template <typename W, typename F, typename R, typename E>
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
return false; // empty
}
std::forward<F>(f)(&(elems[cur_rd].data_));
std::forward<R>(out)(true);
rd_.fetch_add(1, std::memory_order_release);
return true;
}
};
template <>
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&&, E*) {
wrapper->elems()->disconnect_receiver(1);
return false;
}
template <typename W, typename F, typename R,
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
byte_t buff[DS];
for (unsigned k = 0;;) {
auto cur_rd = rd_.load(std::memory_order_relaxed);
if (circ::index_of(cur_rd) ==
circ::index_of(wt_.load(std::memory_order_acquire))) {
return false; // empty
}
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
std::forward<F>(f)(buff);
std::forward<R>(out)(true);
return true;
}
ipc::yield(k);
}
}
};
template <>
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
using flag_t = std::uint64_t;
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
};
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
template <typename W, typename F, typename E>
bool push(W* /*wrapper*/, F&& f, E* elems) {
circ::u2_t cur_ct, nxt_ct;
for (unsigned k = 0;;) {
cur_ct = ct_.load(std::memory_order_relaxed);
if (circ::index_of(nxt_ct = cur_ct + 1) ==
circ::index_of(rd_.load(std::memory_order_acquire))) {
return false; // full
}
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
break;
}
ipc::yield(k);
}
auto* el = elems + circ::index_of(cur_ct);
std::forward<F>(f)(&(el->data_));
// set flag & try update wt
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
while (1) {
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
return true;
}
if ((~cac_ct) != cur_ct) {
return true;
}
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
return true;
}
wt_.store(nxt_ct, std::memory_order_release);
cur_ct = nxt_ct;
nxt_ct = cur_ct + 1;
el = elems + circ::index_of(cur_ct);
}
return true;
}
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&&, E*) {
wrapper->elems()->disconnect_receiver(1);
return false;
}
template <typename W, typename F, typename R,
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
byte_t buff[DS];
for (unsigned k = 0;;) {
auto cur_rd = rd_.load(std::memory_order_relaxed);
auto cur_wt = wt_.load(std::memory_order_acquire);
auto id_rd = circ::index_of(cur_rd);
auto id_wt = circ::index_of(cur_wt);
if (id_rd == id_wt) {
auto* el = elems + id_wt;
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
if ((~cac_ct) != cur_wt) {
return false; // empty
}
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
wt_.store(cur_wt + 1, std::memory_order_release);
}
k = 0;
}
else {
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
std::forward<F>(f)(buff);
std::forward<R>(out)(true);
return true;
}
ipc::yield(k);
}
}
}
};
template <>
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
using rc_t = std::uint64_t;
enum : rc_t {
ep_mask = 0x00000000ffffffffull,
ep_incr = 0x0000000100000000ull
};
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
std::atomic<rc_t> rc_ { 0 }; // read-counter
};
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
circ::u2_t cursor() const noexcept {
return wt_.load(std::memory_order_acquire);
}
template <typename W, typename F, typename E>
bool push(W* wrapper, F&& f, E* elems) {
E* el;
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_acquire);
circ::cc_t rem_cc = cur_rc & ep_mask;
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
return false; // has not finished yet
}
// consider rem_cc to be 0 here
if (el->rc_.compare_exchange_weak(
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
break;
}
ipc::yield(k);
}
std::forward<F>(f)(&(el->data_));
wt_.fetch_add(1, std::memory_order_release);
return true;
}
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&& f, E* elems) {
E* el;
epoch_ += ep_incr;
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_acquire);
circ::cc_t rem_cc = cur_rc & ep_mask;
if (cc & rem_cc) {
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
if (cc == 0) return false; // no reader
}
// just compare & exchange
if (el->rc_.compare_exchange_weak(
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
break;
}
ipc::yield(k);
}
std::forward<F>(f)(&(el->data_));
wt_.fetch_add(1, std::memory_order_release);
return true;
}
template <typename W, typename F, typename R, typename E>
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
if (cur == cursor()) return false; // acquire
auto* el = elems + circ::index_of(cur++);
std::forward<F>(f)(&(el->data_));
for (unsigned k = 0;;) {
auto cur_rc = el->rc_.load(std::memory_order_acquire);
if ((cur_rc & ep_mask) == 0) {
std::forward<R>(out)(true);
return true;
}
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
return true;
}
ipc::yield(k);
}
}
};
template <>
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
using rc_t = std::uint64_t;
using flag_t = std::uint64_t;
enum : rc_t {
rc_mask = 0x00000000ffffffffull,
ep_mask = 0x00ffffffffffffffull,
ep_incr = 0x0100000000000000ull,
ic_mask = 0xff000000ffffffffull,
ic_incr = 0x0000000100000000ull
};
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
std::atomic<rc_t > rc_ { 0 }; // read-counter
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
};
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
circ::u2_t cursor() const noexcept {
return ct_.load(std::memory_order_acquire);
}
constexpr static rc_t inc_rc(rc_t rc) noexcept {
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
}
constexpr static rc_t inc_mask(rc_t rc) noexcept {
return inc_rc(rc) & ~rc_mask;
}
template <typename W, typename F, typename E>
bool push(W* wrapper, F&& f, E* elems) {
E* el;
circ::u2_t cur_ct;
rc_t epoch = epoch_.load(std::memory_order_acquire);
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
circ::cc_t rem_cc = cur_rc & rc_mask;
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
return false; // has not finished yet
}
else if (!rem_cc) {
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
if ((cur_fl != cur_ct) && cur_fl) {
return false; // full
}
}
// consider rem_cc to be 0 here
if (el->rc_.compare_exchange_weak(
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
break;
}
ipc::yield(k);
}
// only one thread/process would touch here at one time
ct_.store(cur_ct + 1, std::memory_order_release);
std::forward<F>(f)(&(el->data_));
// set flag & try update wt
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
return true;
}
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&& f, E* elems) {
E* el;
circ::u2_t cur_ct;
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_acquire);
circ::cc_t rem_cc = cur_rc & rc_mask;
if (cc & rem_cc) {
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
if (cc == 0) return false; // no reader
}
// just compare & exchange
if (el->rc_.compare_exchange_weak(
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
if (epoch == epoch_.load(std::memory_order_acquire)) {
break;
}
else if (push(wrapper, std::forward<F>(f), elems)) {
return true;
}
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
}
ipc::yield(k);
}
// only one thread/process would touch here at one time
ct_.store(cur_ct + 1, std::memory_order_release);
std::forward<F>(f)(&(el->data_));
// set flag & try update wt
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
return true;
}
template <typename W, typename F, typename R, typename E, std::size_t N>
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
auto* el = elems + circ::index_of(cur);
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
if (cur_fl != ~static_cast<flag_t>(cur)) {
return false; // empty
}
++cur;
std::forward<F>(f)(&(el->data_));
for (unsigned k = 0;;) {
auto cur_rc = el->rc_.load(std::memory_order_acquire);
if ((cur_rc & rc_mask) == 0) {
std::forward<R>(out)(true);
el->f_ct_.store(cur + N - 1, std::memory_order_release);
return true;
}
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
bool last_one = false;
if ((last_one = (nxt_rc & rc_mask) == 0)) {
el->f_ct_.store(cur + N - 1, std::memory_order_release);
}
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
std::forward<R>(out)(last_one);
return true;
}
ipc::yield(k);
}
}
};
} // namespace ipc

View File

@ -1,216 +0,0 @@
#pragma once
#include <type_traits>
#include <new>
#include <utility> // [[since C++14]]: std::exchange
#include <algorithm>
#include <atomic>
#include <tuple>
#include <thread>
#include <chrono>
#include <string>
#include <cassert> // assert
#include "libipc/def.h"
#include "libipc/shm.h"
#include "libipc/rw_lock.h"
#include "libipc/utility/log.h"
#include "libipc/platform/detail.h"
#include "libipc/circ/elem_def.h"
namespace ipc {
namespace detail {
class queue_conn {
protected:
circ::cc_t connected_ = 0;
shm::handle elems_h_;
template <typename Elems>
Elems* open(char const * name) {
if (name == nullptr || name[0] == '\0') {
ipc::error("fail open waiter: name is empty!\n");
return nullptr;
}
if (!elems_h_.acquire(name, sizeof(Elems))) {
return nullptr;
}
auto elems = static_cast<Elems*>(elems_h_.get());
if (elems == nullptr) {
ipc::error("fail acquire elems: %s\n", name);
return nullptr;
}
elems->init();
return elems;
}
void close() {
elems_h_.release();
}
public:
queue_conn() = default;
queue_conn(const queue_conn&) = delete;
queue_conn& operator=(const queue_conn&) = delete;
bool connected() const noexcept {
return connected_ != 0;
}
circ::cc_t connected_id() const noexcept {
return connected_;
}
template <typename Elems>
auto connect(Elems* elems) noexcept
/*needs 'optional' here*/
-> std::tuple<bool, bool, decltype(std::declval<Elems>().cursor())> {
if (elems == nullptr) return {};
// if it's already connected, just return
if (connected()) return {connected(), false, 0};
connected_ = elems->connect_receiver();
return {connected(), true, elems->cursor()};
}
template <typename Elems>
bool disconnect(Elems* elems) noexcept {
if (elems == nullptr) return false;
// if it's already disconnected, just return false
if (!connected()) return false;
elems->disconnect_receiver(std::exchange(connected_, 0));
return true;
}
};
template <typename Elems>
class queue_base : public queue_conn {
using base_t = queue_conn;
public:
using elems_t = Elems;
using policy_t = typename elems_t::policy_t;
protected:
elems_t * elems_ = nullptr;
decltype(std::declval<elems_t>().cursor()) cursor_ = 0;
bool sender_flag_ = false;
public:
using base_t::base_t;
queue_base() = default;
explicit queue_base(char const * name)
: queue_base{} {
elems_ = open<elems_t>(name);
}
explicit queue_base(elems_t * elems) noexcept
: queue_base{} {
assert(elems != nullptr);
elems_ = elems;
}
/* not virtual */ ~queue_base() {
base_t::close();
}
elems_t * elems() noexcept { return elems_; }
elems_t const * elems() const noexcept { return elems_; }
bool ready_sending() noexcept {
if (elems_ == nullptr) return false;
return sender_flag_ || (sender_flag_ = elems_->connect_sender());
}
void shut_sending() noexcept {
if (elems_ == nullptr) return;
if (!sender_flag_) return;
elems_->disconnect_sender();
}
bool connect() noexcept {
auto tp = base_t::connect(elems_);
if (std::get<0>(tp) && std::get<1>(tp)) {
cursor_ = std::get<2>(tp);
return true;
}
return std::get<0>(tp);
}
bool disconnect() noexcept {
return base_t::disconnect(elems_);
}
std::size_t conn_count() const noexcept {
return (elems_ == nullptr) ? static_cast<std::size_t>(invalid_value) : elems_->conn_count();
}
bool valid() const noexcept {
return elems_ != nullptr;
}
bool empty() const noexcept {
return !valid() || (cursor_ == elems_->cursor());
}
template <typename T, typename F, typename... P>
bool push(F&& prep, P&&... params) {
if (elems_ == nullptr) return false;
return elems_->push(this, [&](void* p) {
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
});
}
template <typename T, typename F, typename... P>
bool force_push(F&& prep, P&&... params) {
if (elems_ == nullptr) return false;
return elems_->force_push(this, [&](void* p) {
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
});
}
template <typename T, typename F>
bool pop(T& item, F&& out) {
if (elems_ == nullptr) {
return false;
}
return elems_->pop(this, &(this->cursor_), [&item](void* p) {
::new (&item) T(std::move(*static_cast<T*>(p)));
}, std::forward<F>(out));
}
};
} // namespace detail
template <typename T, typename Policy>
class queue final : public detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>> {
using base_t = detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>>;
public:
using value_t = T;
using base_t::base_t;
template <typename... P>
bool push(P&&... params) {
return base_t::template push<T>(std::forward<P>(params)...);
}
template <typename... P>
bool force_push(P&&... params) {
return base_t::template force_push<T>(std::forward<P>(params)...);
}
bool pop(T& item) {
return base_t::pop(item, [](bool) {});
}
template <typename F>
bool pop(T& item, F&& out) {
return base_t::pop(item, std::forward<F>(out));
}
};
} // namespace ipc

View File

@ -1,103 +0,0 @@
#include <string>
#include <utility>
#include "libipc/shm.h"
#include "libipc/utility/pimpl.h"
#include "libipc/memory/resource.h"
namespace ipc {
namespace shm {
class handle::handle_ : public pimpl<handle_> {
public:
shm::id_t id_ = nullptr;
void* m_ = nullptr;
ipc::string n_;
std::size_t s_ = 0;
};
handle::handle()
: p_(p_->make()) {
}
handle::handle(char const * name, std::size_t size, unsigned mode)
: handle() {
acquire(name, size, mode);
}
handle::handle(handle&& rhs)
: handle() {
swap(rhs);
}
handle::~handle() {
release();
p_->clear();
}
void handle::swap(handle& rhs) {
std::swap(p_, rhs.p_);
}
handle& handle::operator=(handle rhs) {
swap(rhs);
return *this;
}
bool handle::valid() const noexcept {
return impl(p_)->m_ != nullptr;
}
std::size_t handle::size() const noexcept {
return impl(p_)->s_;
}
char const * handle::name() const noexcept {
return impl(p_)->n_.c_str();
}
std::int32_t handle::ref() const noexcept {
return shm::get_ref(impl(p_)->id_);
}
void handle::sub_ref() noexcept {
shm::sub_ref(impl(p_)->id_);
}
bool handle::acquire(char const * name, std::size_t size, unsigned mode) {
release();
impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode);
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
return valid();
}
std::int32_t handle::release() {
if (impl(p_)->id_ == nullptr) return -1;
return shm::release(detach());
}
void* handle::get() const {
return impl(p_)->m_;
}
void handle::attach(id_t id) {
if (id == nullptr) return;
release();
impl(p_)->id_ = id;
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
}
id_t handle::detach() {
auto old = impl(p_)->id_;
impl(p_)->id_ = nullptr;
impl(p_)->m_ = nullptr;
impl(p_)->s_ = 0;
impl(p_)->n_.clear();
return old;
}
} // namespace shm
} // namespace ipc

View File

@ -1,83 +0,0 @@
#pragma once
#include <utility>
#include <string>
#include <mutex>
#include <atomic>
#include "libipc/def.h"
#include "libipc/mutex.h"
#include "libipc/condition.h"
#include "libipc/platform/detail.h"
namespace ipc {
namespace detail {
class waiter {
ipc::sync::condition cond_;
ipc::sync::mutex lock_;
std::atomic<bool> quit_ {false};
public:
static void init();
waiter() = default;
waiter(char const *name) {
open(name);
}
~waiter() {
close();
}
bool valid() const noexcept {
return cond_.valid() && lock_.valid();
}
bool open(char const *name) noexcept {
quit_.store(false, std::memory_order_relaxed);
if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) {
return false;
}
if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) {
cond_.close();
return false;
}
return valid();
}
void close() noexcept {
cond_.close();
lock_.close();
}
template <typename F>
bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept {
IPC_UNUSED_ std::lock_guard<ipc::sync::mutex> guard {lock_};
while ([this, &pred] {
return !quit_.load(std::memory_order_relaxed)
&& std::forward<F>(pred)();
}()) {
if (!cond_.wait(lock_, tm)) return false;
}
return true;
}
bool notify() noexcept {
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
return cond_.notify(lock_);
}
bool broadcast() noexcept {
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
return cond_.broadcast(lock_);
}
bool quit_waiting() {
quit_.store(true, std::memory_order_release);
return broadcast();
}
};
} // namespace detail
} // namespace ipc

View File

@ -1,3 +0,0 @@
https://github.com/mutouyun/cpp-ipc
A high-performance inter-process communication library using shared memory on Linux/Windows.

File diff suppressed because it is too large Load Diff

View File

@ -1,316 +0,0 @@
// jpgd.h - C++ class for JPEG decompression.
// Public domain, Rich Geldreich <richgel99@gmail.com>
#ifndef JPEG_DECODER_H
#define JPEG_DECODER_H
#include <stdlib.h>
#include <stdio.h>
#include <setjmp.h>
namespace jpgd
{
typedef unsigned char uint8;
typedef signed short int16;
typedef unsigned short uint16;
typedef unsigned int uint;
typedef signed int int32;
// Loads a JPEG image from a memory buffer or a file.
// req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
// On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
// Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
// Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
// BEGIN EPIC MOD
//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
// END EPIC MOD
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
// Success/failure error codes.
enum jpgd_status
{
JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
};
// Input stream interface.
// Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
// The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
// It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
// Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
class jpeg_decoder_stream
{
public:
jpeg_decoder_stream() { }
virtual ~jpeg_decoder_stream() { }
// The read() method is called when the internal input buffer is empty.
// Parameters:
// pBuf - input buffer
// max_bytes_to_read - maximum bytes that can be written to pBuf
// pEOF_flag - set this to true if at end of stream (no more bytes remaining)
// Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
// Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
};
// stdio FILE stream class.
class jpeg_decoder_file_stream : public jpeg_decoder_stream
{
jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
FILE *m_pFile;
bool m_eof_flag, m_error_flag;
public:
jpeg_decoder_file_stream();
virtual ~jpeg_decoder_file_stream();
bool open(const char *Pfilename);
void close();
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
};
// Memory stream class.
class jpeg_decoder_mem_stream : public jpeg_decoder_stream
{
const uint8 *m_pSrc_data;
uint m_ofs, m_size;
public:
jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
virtual ~jpeg_decoder_mem_stream() { }
bool open(const uint8 *pSrc_data, uint size);
void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
};
// Loads JPEG file from a jpeg_decoder_stream.
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
enum
{
JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
};
typedef int16 jpgd_quant_t;
typedef int16 jpgd_block_t;
class jpeg_decoder
{
public:
// Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
// methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
jpeg_decoder(jpeg_decoder_stream *pStream);
~jpeg_decoder();
// Call this method after constructing the object to begin decompression.
// If JPGD_SUCCESS is returned you may then call decode() on each scanline.
int begin_decoding();
// Returns the next scan line.
// For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
// Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
// Returns JPGD_SUCCESS if a scan line has been returned.
// Returns JPGD_DONE if all scan lines have been returned.
// Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
int decode(const void** pScan_line, uint* pScan_line_len);
inline jpgd_status get_error_code() const { return m_error_code; }
inline int get_width() const { return m_image_x_size; }
inline int get_height() const { return m_image_y_size; }
inline int get_num_components() const { return m_comps_in_frame; }
inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
// Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
inline int get_total_bytes_read() const { return m_total_bytes_read; }
private:
jpeg_decoder(const jpeg_decoder &);
jpeg_decoder &operator =(const jpeg_decoder &);
typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
struct huff_tables
{
bool ac_table;
uint look_up[256];
uint look_up2[256];
uint8 code_size[256];
uint tree[512];
};
struct coeff_buf
{
uint8 *pData;
int block_num_x, block_num_y;
int block_len_x, block_len_y;
int block_size;
};
struct mem_block
{
mem_block *m_pNext;
size_t m_used_count;
size_t m_size;
char m_data[1];
};
jmp_buf m_jmp_state;
mem_block *m_pMem_blocks;
int m_image_x_size;
int m_image_y_size;
jpeg_decoder_stream *m_pStream;
int m_progressive_flag;
uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
int m_comps_in_frame; // # of components in frame
int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
int m_comps_in_scan; // # of components in scan
int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
int m_spectral_start; // spectral selection start
int m_spectral_end; // spectral selection end
int m_successive_low; // successive approximation low
int m_successive_high; // successive approximation high
int m_max_mcu_x_size; // MCU's max. X size in pixels
int m_max_mcu_y_size; // MCU's max. Y size in pixels
int m_blocks_per_mcu;
int m_max_blocks_per_row;
int m_mcus_per_row, m_mcus_per_col;
int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
int m_total_lines_left; // total # lines left in image
int m_mcu_lines_left; // total # lines left in this MCU
int m_real_dest_bytes_per_scan_line;
int m_dest_bytes_per_scan_line; // rounded up
int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
int m_eob_run;
int m_block_y_mcu[JPGD_MAX_COMPONENTS];
uint8* m_pIn_buf_ofs;
int m_in_buf_left;
int m_tem_flag;
bool m_eof_flag;
uint8 m_in_buf_pad_start[128];
uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
uint8 m_in_buf_pad_end[128];
int m_bits_left;
uint m_bit_buf;
int m_restart_interval;
int m_restarts_left;
int m_next_restart_num;
int m_max_mcus_per_row;
int m_max_blocks_per_mcu;
int m_expanded_blocks_per_mcu;
int m_expanded_blocks_per_row;
int m_expanded_blocks_per_component;
bool m_freq_domain_chroma_upsample;
int m_max_mcus_per_col;
uint m_last_dc_val[JPGD_MAX_COMPONENTS];
jpgd_block_t* m_pMCU_coefficients;
int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
uint8* m_pSample_buf;
int m_crr[256];
int m_cbb[256];
int m_crg[256];
int m_cbg[256];
uint8* m_pScan_line_0;
uint8* m_pScan_line_1;
jpgd_status m_error_code;
bool m_ready_flag;
int m_total_bytes_read;
void free_all_blocks();
// BEGIN EPIC MOD
UE_NORETURN void stop_decoding(jpgd_status status);
// END EPIC MOD
void *alloc(size_t n, bool zero = false);
void word_clear(void *p, uint16 c, uint n);
void prep_in_buffer();
void read_dht_marker();
void read_dqt_marker();
void read_sof_marker();
void skip_variable_marker();
void read_dri_marker();
void read_sos_marker();
int next_marker();
int process_markers();
void locate_soi_marker();
void locate_sof_marker();
int locate_sos_marker();
void init(jpeg_decoder_stream * pStream);
void create_look_ups();
void fix_in_buffer();
void transform_mcu(int mcu_row);
void transform_mcu_expand(int mcu_row);
coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
void load_next_row();
void decode_next_row();
void make_huff_table(int index, huff_tables *pH);
void check_quant_tables();
void check_huff_tables();
void calc_mcu_block_order();
int init_scan();
void init_frame();
void process_restart();
void decode_scan(pDecode_block_func decode_block_func);
void init_progressive();
void init_sequential();
void decode_start();
void decode_init(jpeg_decoder_stream * pStream);
void H2V2Convert();
void H2V1Convert();
void H1V2Convert();
void H1V1Convert();
void gray_convert();
void expanded_convert();
void find_eoi();
inline uint get_char();
inline uint get_char(bool *pPadding_flag);
inline void stuff_char(uint8 q);
inline uint8 get_octet();
inline uint get_bits(int num_bits);
inline uint get_bits_no_markers(int numbits);
inline int huff_decode(huff_tables *pH);
inline int huff_decode(huff_tables *pH, int& extrabits);
static inline uint8 clamp(int i);
static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
};
} // namespace jpgd
#endif // JPEG_DECODER_H

File diff suppressed because it is too large Load Diff

View File

@ -1,172 +0,0 @@
// jpge.h - C++ class for JPEG compression.
// Public domain, Rich Geldreich <richgel99@gmail.com>
// Alex Evans: Added RGBA support, linear memory allocator.
#ifndef JPEG_ENCODER_H
#define JPEG_ENCODER_H
#include <stdint.h>
namespace jpge
{
typedef unsigned char uint8;
typedef signed short int16;
typedef signed int int32;
typedef unsigned short uint16;
typedef unsigned int uint32;
typedef unsigned int uint;
// JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common.
enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 };
// JPEG compression parameters structure.
struct params
{
inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { }
inline bool check_valid() const
{
if ((m_quality < 1) || (m_quality > 100)) return false;
if ((uint)m_subsampling > (uint)H2V2) return false;
return true;
}
// Quality: 1-100, higher is better. Typical values are around 50-95.
int m_quality;
// m_subsampling:
// 0 = Y (grayscale) only
// 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU)
// 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU)
// 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common)
subsampling_t m_subsampling;
// Disables CbCr discrimination - only intended for testing.
// If true, the Y quantization table is also used for the CbCr channels.
bool m_no_chroma_discrim_flag;
bool m_two_pass_flag;
};
// Writes JPEG image to a file.
// num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels.
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
// Writes JPEG image to memory buffer.
// On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes.
// If return value is true, buf_size will be set to the size of the compressed data.
bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
// Output stream abstract class - used by the jpeg_encoder class to write to the output stream.
// put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts.
class output_stream
{
public:
virtual ~output_stream() { };
virtual bool put_buf(const void* Pbuf, int64_t len) = 0;
template<class T> inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); }
};
// Lower level jpeg_encoder class - useful if more control is needed than the above helper functions.
class jpeg_encoder
{
public:
jpeg_encoder();
~jpeg_encoder();
// Initializes the compressor.
// pStream: The stream object to use for writing compressed data.
// params - Compression parameters structure, defined above.
// width, height - Image dimensions.
// channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data.
// Returns false on out of memory or if a stream write fails.
bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params());
const params &get_params() const { return m_params; }
// Deinitializes the compressor, freeing any allocated memory. May be called at any time.
void deinit();
uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; }
inline uint get_cur_pass() { return m_pass_num; }
// Call this method with each source scanline.
// width * src_channels bytes per scanline is expected (RGB or Y format).
// You must call with NULL after all scanlines are processed to finish compression.
// Returns false on out of memory or if a stream write fails.
bool process_scanline(const void* pScanline);
private:
jpeg_encoder(const jpeg_encoder &);
jpeg_encoder &operator =(const jpeg_encoder &);
typedef int32 sample_array_t;
output_stream *m_pStream;
params m_params;
uint8 m_num_components;
uint8 m_comp_h_samp[3], m_comp_v_samp[3];
int m_image_x, m_image_y, m_image_bpp, m_image_bpl;
int m_image_x_mcu, m_image_y_mcu;
int m_image_bpl_xlt, m_image_bpl_mcu;
int m_mcus_per_row;
int m_mcu_x, m_mcu_y;
uint8 *m_mcu_lines[16];
uint8 m_mcu_y_ofs;
sample_array_t m_sample_array[64];
int16 m_coefficient_array[64];
int32 m_quantization_tables[2][64];
uint m_huff_codes[4][256];
uint8 m_huff_code_sizes[4][256];
uint8 m_huff_bits[4][17];
uint8 m_huff_val[4][256];
uint32 m_huff_count[4][256];
int m_last_dc_val[3];
enum { JPGE_OUT_BUF_SIZE = 2048 };
uint8 m_out_buf[JPGE_OUT_BUF_SIZE];
uint8 *m_pOut_buf;
uint m_out_buf_left;
uint32 m_bit_buffer;
uint m_bits_in;
uint8 m_pass_num;
bool m_all_stream_writes_succeeded;
void optimize_huffman_table(int table_num, int table_len);
void emit_byte(uint8 i);
void emit_word(uint i);
void emit_marker(int marker);
void emit_jfif_app0();
void emit_dqt();
void emit_sof();
void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag);
void emit_dhts();
void emit_sos();
void emit_markers();
void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val);
void compute_quant_table(int32 *dst, int16 *src);
void adjust_quant_table(int32 *dst, int32 *src);
void first_pass_init();
bool second_pass_init();
bool jpg_open(int p_x_res, int p_y_res, int src_channels);
void load_block_8_8_grey(int x);
void load_block_8_8(int x, int y, int c);
void load_block_16_8(int x, int c);
void load_block_16_8_8(int x, int c);
void load_quantized_coefficients(int component_num);
void flush_output_buffer();
void put_bits(uint bits, uint len);
void code_coefficients_pass_one(int component_num);
void code_coefficients_pass_two(int component_num);
void code_block(int component_num);
void process_mcu_row();
bool terminate_pass_one();
bool terminate_pass_two();
bool process_end_of_image();
void load_mcu(const void* src);
void clear();
void init();
};
} // namespace jpge
#endif // JPEG_ENCODER

View File

@ -1,3 +0,0 @@
jpge.h - C++ class for JPEG compression.
Public domain, Rich Geldreich <richgel99@gmail.com>
Alex Evans: Added RGBA support, linear memory allocator.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,433 +0,0 @@
#pragma once
#include <atomic>
#include <utility>
#include <cstring>
#include <type_traits>
#include <cstdint>
#include "libipc/def.h"
#include "libipc/platform/detail.h"
#include "libipc/circ/elem_def.h"
#include "libipc/utility/log.h"
#include "libipc/utility/utility.h"
namespace ipc {
////////////////////////////////////////////////////////////////
/// producer-consumer implementation
////////////////////////////////////////////////////////////////
template <typename Flag>
struct prod_cons_impl;
template <>
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
};
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
constexpr circ::u2_t cursor() const noexcept {
return 0;
}
template <typename W, typename F, typename E>
bool push(W* /*wrapper*/, F&& f, E* elems) {
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
return false; // full
}
std::forward<F>(f)(&(elems[cur_wt].data_));
wt_.fetch_add(1, std::memory_order_release);
return true;
}
/**
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
* So we could just disconnect all connections of receiver, and return false.
*/
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&&, E*) {
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
return false;
}
template <typename W, typename F, typename R, typename E>
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
return false; // empty
}
std::forward<F>(f)(&(elems[cur_rd].data_));
std::forward<R>(out)(true);
rd_.fetch_add(1, std::memory_order_release);
return true;
}
};
template <>
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&&, E*) {
wrapper->elems()->disconnect_receiver(1);
return false;
}
template <typename W, typename F, typename R,
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
byte_t buff[DS];
for (unsigned k = 0;;) {
auto cur_rd = rd_.load(std::memory_order_relaxed);
if (circ::index_of(cur_rd) ==
circ::index_of(wt_.load(std::memory_order_acquire))) {
return false; // empty
}
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
std::forward<F>(f)(buff);
std::forward<R>(out)(true);
return true;
}
ipc::yield(k);
}
}
};
template <>
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
using flag_t = std::uint64_t;
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
};
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
template <typename W, typename F, typename E>
bool push(W* /*wrapper*/, F&& f, E* elems) {
circ::u2_t cur_ct, nxt_ct;
for (unsigned k = 0;;) {
cur_ct = ct_.load(std::memory_order_relaxed);
if (circ::index_of(nxt_ct = cur_ct + 1) ==
circ::index_of(rd_.load(std::memory_order_acquire))) {
return false; // full
}
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
break;
}
ipc::yield(k);
}
auto* el = elems + circ::index_of(cur_ct);
std::forward<F>(f)(&(el->data_));
// set flag & try update wt
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
while (1) {
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
return true;
}
if ((~cac_ct) != cur_ct) {
return true;
}
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
return true;
}
wt_.store(nxt_ct, std::memory_order_release);
cur_ct = nxt_ct;
nxt_ct = cur_ct + 1;
el = elems + circ::index_of(cur_ct);
}
return true;
}
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&&, E*) {
wrapper->elems()->disconnect_receiver(1);
return false;
}
template <typename W, typename F, typename R,
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
byte_t buff[DS];
for (unsigned k = 0;;) {
auto cur_rd = rd_.load(std::memory_order_relaxed);
auto cur_wt = wt_.load(std::memory_order_acquire);
auto id_rd = circ::index_of(cur_rd);
auto id_wt = circ::index_of(cur_wt);
if (id_rd == id_wt) {
auto* el = elems + id_wt;
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
if ((~cac_ct) != cur_wt) {
return false; // empty
}
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
wt_.store(cur_wt + 1, std::memory_order_release);
}
k = 0;
}
else {
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
std::forward<F>(f)(buff);
std::forward<R>(out)(true);
return true;
}
ipc::yield(k);
}
}
}
};
template <>
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
using rc_t = std::uint64_t;
enum : rc_t {
ep_mask = 0x00000000ffffffffull,
ep_incr = 0x0000000100000000ull
};
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
std::atomic<rc_t> rc_ { 0 }; // read-counter
};
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
circ::u2_t cursor() const noexcept {
return wt_.load(std::memory_order_acquire);
}
template <typename W, typename F, typename E>
bool push(W* wrapper, F&& f, E* elems) {
E* el;
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_acquire);
circ::cc_t rem_cc = cur_rc & ep_mask;
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
return false; // has not finished yet
}
// consider rem_cc to be 0 here
if (el->rc_.compare_exchange_weak(
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
break;
}
ipc::yield(k);
}
std::forward<F>(f)(&(el->data_));
wt_.fetch_add(1, std::memory_order_release);
return true;
}
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&& f, E* elems) {
E* el;
epoch_ += ep_incr;
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_acquire);
circ::cc_t rem_cc = cur_rc & ep_mask;
if (cc & rem_cc) {
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
if (cc == 0) return false; // no reader
}
// just compare & exchange
if (el->rc_.compare_exchange_weak(
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
break;
}
ipc::yield(k);
}
std::forward<F>(f)(&(el->data_));
wt_.fetch_add(1, std::memory_order_release);
return true;
}
template <typename W, typename F, typename R, typename E>
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
if (cur == cursor()) return false; // acquire
auto* el = elems + circ::index_of(cur++);
std::forward<F>(f)(&(el->data_));
for (unsigned k = 0;;) {
auto cur_rc = el->rc_.load(std::memory_order_acquire);
if ((cur_rc & ep_mask) == 0) {
std::forward<R>(out)(true);
return true;
}
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
return true;
}
ipc::yield(k);
}
}
};
template <>
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
using rc_t = std::uint64_t;
using flag_t = std::uint64_t;
enum : rc_t {
rc_mask = 0x00000000ffffffffull,
ep_mask = 0x00ffffffffffffffull,
ep_incr = 0x0100000000000000ull,
ic_mask = 0xff000000ffffffffull,
ic_incr = 0x0000000100000000ull
};
template <std::size_t DataSize, std::size_t AlignSize>
struct elem_t {
std::aligned_storage_t<DataSize, AlignSize> data_ {};
std::atomic<rc_t > rc_ { 0 }; // read-counter
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
};
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
circ::u2_t cursor() const noexcept {
return ct_.load(std::memory_order_acquire);
}
constexpr static rc_t inc_rc(rc_t rc) noexcept {
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
}
constexpr static rc_t inc_mask(rc_t rc) noexcept {
return inc_rc(rc) & ~rc_mask;
}
template <typename W, typename F, typename E>
bool push(W* wrapper, F&& f, E* elems) {
E* el;
circ::u2_t cur_ct;
rc_t epoch = epoch_.load(std::memory_order_acquire);
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
circ::cc_t rem_cc = cur_rc & rc_mask;
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
return false; // has not finished yet
}
else if (!rem_cc) {
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
if ((cur_fl != cur_ct) && cur_fl) {
return false; // full
}
}
// consider rem_cc to be 0 here
if (el->rc_.compare_exchange_weak(
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
break;
}
ipc::yield(k);
}
// only one thread/process would touch here at one time
ct_.store(cur_ct + 1, std::memory_order_release);
std::forward<F>(f)(&(el->data_));
// set flag & try update wt
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
return true;
}
template <typename W, typename F, typename E>
bool force_push(W* wrapper, F&& f, E* elems) {
E* el;
circ::u2_t cur_ct;
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
for (unsigned k = 0;;) {
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
if (cc == 0) return false; // no reader
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
// check all consumers have finished reading this element
auto cur_rc = el->rc_.load(std::memory_order_acquire);
circ::cc_t rem_cc = cur_rc & rc_mask;
if (cc & rem_cc) {
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
if (cc == 0) return false; // no reader
}
// just compare & exchange
if (el->rc_.compare_exchange_weak(
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
if (epoch == epoch_.load(std::memory_order_acquire)) {
break;
}
else if (push(wrapper, std::forward<F>(f), elems)) {
return true;
}
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
}
ipc::yield(k);
}
// only one thread/process would touch here at one time
ct_.store(cur_ct + 1, std::memory_order_release);
std::forward<F>(f)(&(el->data_));
// set flag & try update wt
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
return true;
}
template <typename W, typename F, typename R, typename E, std::size_t N>
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
auto* el = elems + circ::index_of(cur);
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
if (cur_fl != ~static_cast<flag_t>(cur)) {
return false; // empty
}
++cur;
std::forward<F>(f)(&(el->data_));
for (unsigned k = 0;;) {
auto cur_rc = el->rc_.load(std::memory_order_acquire);
if ((cur_rc & rc_mask) == 0) {
std::forward<R>(out)(true);
el->f_ct_.store(cur + N - 1, std::memory_order_release);
return true;
}
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
bool last_one = false;
if ((last_one = (nxt_rc & rc_mask) == 0)) {
el->f_ct_.store(cur + N - 1, std::memory_order_release);
}
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
std::forward<R>(out)(last_one);
return true;
}
ipc::yield(k);
}
}
};
} // namespace ipc

View File

@ -1,58 +0,0 @@
The goal of reducing sequential computation also forms the foundation of the Extended Neural GPU \citep{extendedngpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as basic building block, computing hidden representations in parallel for all input and output positions. In these models, the number of operations required to relate signals from two arbitrary input or output positions grows in the distance between positions, linearly for ConvS2S and logarithmically for ByteNet. This makes it more difficult to learn dependencies between distant positions \citep{hochreiter2001gradient}. In the Transformer this is reduced to a constant number of operations, albeit at the cost of reduced effective resolution due to averaging attention-weighted positions, an effect we counteract with Multi-Head Attention as described in section~\ref{sec:attention}.
Self-attention, sometimes called intra-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. Self-attention has been used successfully in a variety of tasks including reading comprehension, abstractive summarization, textual entailment and learning task-independent sentence representations \citep{cheng2016long, decomposableAttnModel, paulus2017deep, lin2017structured}.
End-to-end memory networks are based on a recurrent attention mechanism instead of sequence-aligned recurrence and have been shown to perform well on simple-language question answering and language modeling tasks \citep{sukhbaatar2015}.
To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.
In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as \citep{neural_gpu, NalBytenet2017} and \citep{JonasFaceNet2017}.
%\citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
%For example,! in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at low computation cost, making it an essential ingredient in competitive recurrent models for machine translation.
%A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
%After the seminal models introduced in \citep{sutskever14, bahdanau2014neural, cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation (MT) and language modeling with recurrent endoder-decoder and recurrent language models. Recent effort \citep{shazeer2017outrageously} has successfully combined the power of conditional computation with sequence models to train very large models for MT, pushing SOTA at lower computational cost.
%Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state precludes processing all timesteps at once, instead requiring long sequences of sequential operations. In practice, this results in greatly reduced computational efficiency, as on modern computing hardware, a single operation on a large batch is much faster than a large number of operations on small batches. The problem gets worse at longer sequence lengths. Although sequential computation is not a severe bottleneck at inference time, as autoregressively generating each output requires all previous outputs, the inability to compute scores at all output positions at once hinders us from rapidly training our models over large datasets. Although impressive work such as \citep{Kuchaiev2017Factorization} is able to significantly accelerate the training of LSTMs with factorization tricks, we are still bound by the linear dependence on sequence length.
%If the model could compute hidden states at each time step using only the inputs and outputs, it would be liberated from the dependence on results from previous time steps during training. This line of thought is the foundation of recent efforts such as the Markovian neural GPU \citep{neural_gpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as a building block to compute hidden representations simultaneously for all timesteps, resulting in $O(1)$ sequential time complexity. \citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
%A crucial component for accurate sequence prediction is modeling cross-positional communication. For example, in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at a low computation cost, also $O(1)$ sequential time complexity, making it an essential ingredient in recurrent encoder-decoder architectures for MT. A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
%Note: Facebook model is no better than RNNs in this regard, since it requires a number of layers proportional to the distance you want to communicate. Bytenet is more promising, since it requires a logarithmnic number of layers (does bytenet have SOTA results)?
%Note: An attention layer can connect a very large number of positions at a low computation cost in O(1) sequential operations. This is why encoder-decoder attention has been so successful in seq-to-seq models so far. It is only natural, then, to also use attention to connect the timesteps of the same sequence.
%Note: I wouldn't say that long sequences are not a problem during inference. It would be great if we could infer with no long sequences. We could just say later on that, while our training graph is constant-depth, our model still requires sequential operations in the decoder part during inference due to the autoregressive nature of the model.
%\begin{table}[h!]
%\caption{Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth. $n$ represents the sequence length and $d$ represents the channel depth.}
%\label{tab:op_complexities}
%\begin{center}
%\vspace{-5pt}
%\scalebox{0.75}{
%\begin{tabular}{l|c|c|c}
%\hline \hline
%Layer Type & Receptive & Complexity & Sequential \\
% & Field & & Operations \\
%\hline
%Pointwise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ \\
%\hline
%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\
%\hline
%Convolutional & $r$ & $O(r \cdot n \cdot d^2)$ & $O(1)$ \\
%\hline
%Convolutional (separable) & $r$ & $O(r \cdot n \cdot d + n %\cdot d^2)$ & $O(1)$ \\
%\hline
%Attention & $r$ & $O(r \cdot n \cdot d)$ & $O(1)$ \\
%\hline \hline
%\end{tabular}
%}
%\end{center}
%\end{table}

View File

@ -1,18 +0,0 @@
Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
%\marginpar{not sure if the memory constraints are understandable here}
Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
% Just a standard paragraph with citations, rewrite.
%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.

View File

@ -1,155 +0,0 @@
\begin{figure}
\centering
\includegraphics[scale=0.6]{Figures/ModalNet-21}
\caption{The Transformer - model architecture.}
\label{fig:model-arch}
\end{figure}
% Although the primary workhorse of our model is attention,
%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail.
Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next.
The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively.
\subsection{Encoder and Decoder Stacks}
\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$.
\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$.
% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail.
\subsection{Attention} \label{sec:attention}
An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key.
\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod}
% \begin{figure}
% \centering
% \includegraphics[scale=0.6]{Figures/ModalNet-19}
% \caption{Scaled Dot-Product Attention.}
% \label{fig:multi-head-att}
% \end{figure}
We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values.
In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as:
\begin{equation}
\mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V
\end{equation}
The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients.
% Already described in the subsequent section
%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$.
%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model.
While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$.
%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$.
\subsubsection{Multi-Head Attention} \label{sec:multihead}
\begin{figure}
\begin{minipage}[t]{0.5\textwidth}
\centering
Scaled Dot-Product Attention \\
\vspace{0.5cm}
\includegraphics[scale=0.6]{Figures/ModalNet-19}
\end{minipage}
\begin{minipage}[t]{0.5\textwidth}
\centering
Multi-Head Attention \\
\vspace{0.1cm}
\includegraphics[scale=0.6]{Figures/ModalNet-20}
\end{minipage}
% \centering
\caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.}
\label{fig:multi-head-att}
\end{figure}
Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively.
On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}.
Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
\begin{align*}
\mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\
% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\
\text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\
\end{align*}
Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$.
%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation.
In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$.
Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality.
\subsubsection{Applications of Attention in our Model}
The Transformer uses multi-head attention in three different ways:
\begin{itemize}
\item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}.
\item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder.
\item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}.
\end{itemize}
\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn}
In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between.
\begin{equation}
\mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2
\end{equation}
While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$.
%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention.
%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention.
%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as
%\begin{equation*} \label{eq:attention}
% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq).
%\end{equation*}
%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$.
%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$.
%\marginpar{}
\subsection{Embeddings and Softmax}
Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$.
\subsection{Positional Encoding}
Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}.
In this work, we use sine and cosine functions of different frequencies:
\begin{align*}
PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\
PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel})
\end{align*}
where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$.
We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training.

View File

@ -1,45 +0,0 @@
\pagebreak
\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention}
In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted):
\begin{align*}
FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\
A(q, K, V) = Softmax(qK^T)V
\end{align*}
Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function.
%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$.
Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations.
In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer.
In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model.
Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}.
\begin{table}[h]
\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.}
\label{tab:parameter_attention}
\begin{center}
\vspace{-2mm}
%\scalebox{1.0}{
\begin{tabular}{c|cccccc|cccc}
\hline\rule{0pt}{2.0ex}
& \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} &
\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} &
\multirow{2}{*}{$n_p$} &
PPL & BLEU & params & training\\
& & & & & & & (dev) & (dev) & $\times10^6$ & time \\
\hline\rule{0pt}{2.0ex}
base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\
\hline\rule{0pt}{2.0ex}
AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\
AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\
\hline
\end{tabular}
%}
\end{center}
\end{table}

View File

@ -1,8 +0,0 @@
chatgpt的老祖宗《Attention is all you need》
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin
真实的摘要如下
The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.
https://arxiv.org/abs/1706.03762

View File

@ -1,2 +0,0 @@
from stable_baselines3.dqn.dqn import DQN
from stable_baselines3.dqn.policies import CnnPolicy, MlpPolicy

View File

@ -1,245 +0,0 @@
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.preprocessing import maybe_transpose
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import DQNPolicy
class DQN(OffPolicyAlgorithm):
"""
Deep Q-Network (DQN)
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
Default hyperparameters are taken from the nature paper,
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param target_update_interval: update the target network every ``target_update_interval``
environment steps.
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
:param exploration_initial_eps: initial value of random action probability
:param exploration_final_eps: final value of random action probability
:param max_grad_norm: The maximum value for the gradient clipping
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1000000,
learning_starts: int = 50000,
batch_size: Optional[int] = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 4,
gradient_steps: int = 1,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(DQN, self).__init__(
policy,
env,
DQNPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise=None, # No action noise
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Discrete,),
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
self.max_grad_norm = max_grad_norm
# "epsilon" for the epsilon-greedy exploration
self.exploration_rate = 0.0
# Linear schedule will be defined in `_setup_model()`
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(DQN, self)._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
"""
Update the exploration rate and target network if needed.
This method is called in ``collect_rollouts()`` after each step in the environment.
"""
if self.num_timesteps % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
logger.record("rollout/exploration rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Update learning rate according to schedule
self._update_learning_rate(self.policy.optimizer)
losses = []
for _ in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Compute the next Q-values using the target network
next_q_values = self.q_net_target(replay_data.next_observations)
# Follow greedy policy: use the one with the highest value
next_q_values, _ = next_q_values.max(dim=1)
# Avoid potential broadcast issue
next_q_values = next_q_values.reshape(-1, 1)
# 1-step TD target
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates
current_q_values = self.q_net(replay_data.observations)
# Retrieve the q-values for the actions from the replay buffer
current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
# Compute Huber loss (less sensitive to outliers)
loss = F.smooth_l1_loss(current_q_values, target_q_values)
losses.append(loss.item())
# Optimize the policy
self.policy.optimizer.zero_grad()
loss.backward()
# Clip gradient norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# Increase update counter
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Overrides the base_class predict function to include epsilon-greedy exploration.
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
if not deterministic and np.random.rand() < self.exploration_rate:
if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, mask, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(DQN, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []

View File

@ -1,237 +0,0 @@
from typing import Any, Dict, List, Optional, Type
import gym
import torch as th
from torch import nn
from stable_baselines3.common.policies import BasePolicy, register_policy
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
from stable_baselines3.common.type_aliases import Schedule
class QNetwork(BasePolicy):
"""
Action-Value (Q-Value) network for DQN
:param observation_space: Observation space
:param action_space: Action space
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor: nn.Module,
features_dim: int,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
):
super(QNetwork, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.features_extractor = features_extractor
self.features_dim = features_dim
self.normalize_images = normalize_images
action_dim = self.action_space.n # number of actions
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
self.q_net = nn.Sequential(*q_net)
def forward(self, obs: th.Tensor) -> th.Tensor:
"""
Predict the q-values.
:param obs: Observation
:return: The estimated Q-Value for each action.
"""
return self.q_net(self.extract_features(obs))
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
q_values = self.forward(observation)
# Greedy action
action = q_values.argmax(dim=1).reshape(-1)
return action
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
features_extractor=self.features_extractor,
)
)
return data
class DQNPolicy(BasePolicy):
"""
Policy class with Q-Value Net and target net for DQN
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(DQNPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
)
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [64, 64]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.normalize_images = normalize_images
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": self.net_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.q_net, self.q_net_target = None, None
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the network and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self.q_net = self.make_q_net()
self.q_net_target = self.make_q_net()
self.q_net_target.load_state_dict(self.q_net.state_dict())
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def make_q_net(self) -> QNetwork:
# Make sure we always have separate networks for features extractors etc
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
return QNetwork(**net_args).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self.q_net._predict(obs, deterministic=deterministic)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_args["net_arch"],
activation_fn=self.net_args["activation_fn"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
MlpPolicy = DQNPolicy
class CnnPolicy(DQNPolicy):
"""
Policy class for DQN when using images as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("CnnPolicy", CnnPolicy)

View File

@ -1,2 +0,0 @@
github stablebaseline3
https://github.com/DLR-RM/stable-baselines3

View File

@ -1,27 +0,0 @@
"In practice, we found that a high-entropy initial state is more likely to increase the speed of training.
The entropy is calculated by:
$$H=-\sum_{k= 1}^{n_k} p(k) \cdot \log p(k), p(k)=\frac{|A_k|}{|\mathcal{A}|}$$
where $H$ is the entropy, $|A_k|$ is the number of agent nodes in $k$-th cluster, $|\mathcal{A}|$ is the total number of agents.
To ensure the Cooperation Graph initialization has higher entropy,
we will randomly generate multiple initial states,
rank by their entropy and then pick the one with maximum $H$."
```
FROM ubuntu:latest
RUN apt-get update && \
apt-get install -y python3 python3-pip && \
rm -rf /var/lib/apt/lists/*
RUN echo '[global]' > /etc/pip.conf && \
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
RUN pip3 install gradio requests[socks] mdtex2html
COPY . /gpt
WORKDIR /gpt
CMD ["python3", "main.py"]
```

View File

@ -0,0 +1,63 @@
from toolbox import CatchException, update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@CatchException
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
plugin_kwargs 插件模型的参数, 如温度和top_p等, 一般原样传递下去就行
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
state = chatbot._cookies.get('plugin_state_0001', None) # 初始化插件状态
if state is None:
chatbot._cookies['lock_plugin'] = 'crazy_functions.交互功能函数模板->交互功能模板函数' # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
chatbot._cookies['plugin_state_0001'] = 'wait_user_keyword' # 赋予插件状态
chatbot.append(("第一次调用:", "请输入关键词, 我将为您查找相关壁纸, 建议使用英文单词, 插件锁定中,请直接提交即可。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
if state == 'wait_user_keyword':
chatbot._cookies['lock_plugin'] = None # 解除插件锁定,避免遗忘导致死锁
chatbot._cookies['plugin_state_0001'] = None # 解除插件状态,避免遗忘导致死锁
# 解除插件锁定
chatbot.append((f"获取关键词:{txt}", ""))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
page_return = get_image_page_by_keyword(txt)
inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=inputs, inputs_show_user=inputs_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'"
)
chatbot[-1] = [chatbot[-1][0], gpt_say]
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# ---------------------------------------------------------------------------------
def get_image_page_by_keyword(keyword):
import requests
from bs4 import BeautifulSoup
response = requests.get(f'https://wallhaven.cc/search?q={keyword}', timeout=2)
res = "image urls: \n"
for image_element in BeautifulSoup(response.content, 'html.parser').findAll("img"):
try:
res += image_element["data-src"]
res += "\n"
except:
pass
return res

View File

@ -27,8 +27,10 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
}
response = requests.post(url, headers=headers, json=data, proxies=proxies)
print(response.content)
try:
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
except:
raise RuntimeError(response.content.decode())
# 文件保存到本地
r = requests.get(image_url, proxies=proxies)
file_path = 'gpt_log/image_gen/'

View File

@ -1,4 +1,4 @@
from toolbox import CatchException, update_ui
from toolbox import CatchException, update_ui, promote_file_to_downloadzone
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
import re
@ -12,7 +12,7 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
os.makedirs('./gpt_log/', exist_ok=True)
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
from theme import advanced_css
from theme.theme import advanced_css
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
for i, contents in enumerate(chatbot):
for j, content in enumerate(contents):
@ -29,9 +29,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
for h in history:
f.write("\n>>>" + h)
f.write('</code>')
res = '对话历史写入:' + os.path.abspath(f'./gpt_log/{file_name}')
print(res)
return res
promote_file_to_downloadzone(f'./gpt_log/{file_name}', rename_file=file_name, chatbot=chatbot)
return '对话历史写入:' + os.path.abspath(f'./gpt_log/{file_name}')
def gen_file_preview(file_name):
try:

View File

@ -14,17 +14,19 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
doc = Document(fp)
file_content = "\n".join([para.text for para in doc.paragraphs])
else:
try:
import win32com.client
word = win32com.client.Dispatch("Word.Application")
word.visible = False
# 打开文件
print('fp', os.getcwd())
doc = word.Documents.Open(os.getcwd() + '/' + fp)
# file_content = doc.Content.Text
doc = word.ActiveDocument
file_content = doc.Range().Text
doc.Close()
word.Quit()
except:
raise RuntimeError('请先将.doc文档转换为.docx文档。')
print(file_content)
# private_upload里面的文件名在解压zip后容易出现乱码rar和7z格式正常故可以只分析文章内容不输入文件名

View File

@ -1,121 +1,107 @@
from toolbox import update_ui
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
from toolbox import CatchException, report_execption, write_results_to_file
import re
import unicodedata
fast_debug = False
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import read_and_clean_pdf_text
from .crazy_utils import input_clipping
def is_paragraph_break(match):
"""
根据给定的匹配结果来判断换行符是否表示段落分隔。
如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。
也可以根据之前的内容长度来判断段落是否已经足够长。
"""
prev_char, next_char = match.groups()
# 句子结束标志
sentence_endings = ".!?"
# 设定一个最小段落长度阈值
min_paragraph_length = 140
if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length:
return "\n\n"
else:
return " "
def normalize_text(text):
"""
通过把连字ligatures等文本特殊符号转换为其基本形式来对文本进行归一化处理。
例如,将连字 "fi" 转换为 "f""i"
"""
# 对文本进行归一化处理,分解连字
normalized_text = unicodedata.normalize("NFKD", text)
# 替换其他特殊字符
cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text)
return cleaned_text
def clean_text(raw_text):
"""
对从 PDF 提取出的原始文本进行清洗和格式化处理。
1. 对原始文本进行归一化处理。
2. 替换跨行的连词
3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换
"""
# 对文本进行归一化处理
normalized_text = normalize_text(raw_text)
# 替换跨行的连词
text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text)
# 根据前后相邻字符的特点,找到原文本中的换行符
newlines = re.compile(r'(\S)\n(\S)')
# 根据 heuristic 规则,用空格或段落分隔符替换原换行符
final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text)
return final_text.strip()
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
import time, glob, os, fitz
print('begin analysis on:', file_manifest)
for index, fp in enumerate(file_manifest):
with fitz.open(fp) as doc:
file_content = ""
for page in doc:
file_content += page.get_text()
file_content = clean_text(file_content)
print(file_content)
file_write_buffer = []
for file_name in file_manifest:
print('begin analysis on:', file_name)
############################## <第 0 步切割PDF> ##################################
# 递归地切割PDF文件每一块尽量是完整的一个section比如introductionexperiment等必要时再进行切割
# 的长度必须小于 2500 个 Token
file_content, page_one = read_and_clean_pdf_text(file_name) # 尝试按照章节切割PDF
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
TOKEN_LIMIT_PER_FRAGMENT = 2500
if not fast_debug:
msg = '正常'
# ** gpt request **
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
# 为了更好的效果我们剥离Introduction之后的部分如果有
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
############################## <第 1 步从摘要中提取高价值信息放到history中> ##################################
final_results = []
final_results.append(paper_meta)
############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ##################################
i_say_show_user = f'首先你在中文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
iteration_results = []
last_iteration_result = paper_meta # 初始值是摘要
MAX_WORD_TOTAL = 4096 * 0.7
n_fragment = len(paper_fragments)
if n_fragment >= 20: print('文章极长,不能达到预期效果')
for i in range(n_fragment):
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i]}"
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i][:200]}"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问 i_say_show_user=给用户看的提问
llm_kwargs, chatbot,
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
sys_prompt="Extract the main idea of this section with Chinese." # 提示
)
iteration_results.append(gpt_say)
last_iteration_result = gpt_say
############################## <第 3 步整理history提取总结> ##################################
final_results.extend(iteration_results)
final_results.append(f'Please conclude this paper discussed above。')
# This prompt is from https://github.com/kaixindelele/ChatPaper/blob/main/chat_paper.py
NUM_OF_WORD = 1000
i_say = """
1. Mark the title of the paper (with Chinese translation)
2. list all the authors' names (use English)
3. mark the first author's affiliation (output Chinese translation only)
4. mark the keywords of this article (use English)
5. link to the paper, Github code link (if available, fill in Github:None if not)
6. summarize according to the following four points.Be sure to use Chinese answers (proper nouns need to be marked in English)
- (1):What is the research background of this article?
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
- (3):What is the research methodology proposed in this paper?
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?
Follow the format of the output that follows:
1. Title: xxx\n\n
2. Authors: xxx\n\n
3. Affiliation: xxx\n\n
4. Keywords: xxx\n\n
5. Urls: xxx or xxx , xxx \n\n
6. Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n
Be sure to use Chinese answers (proper nouns need to be marked in English), statements as concise and academic as possible,
do not have too much repetitive information, numerical values using the original numbers.
"""
# This prompt is from https://github.com/kaixindelele/ChatPaper/blob/main/chat_paper.py
file_write_buffer.extend(final_results)
i_say, final_results = input_clipping(i_say, final_results, max_token_limit=2000)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say_show_user,
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history=[],
sys_prompt="总结文章。"
) # 带超时倒计时
inputs=i_say, inputs_show_user='开始最终总结',
llm_kwargs=llm_kwargs, chatbot=chatbot, history=final_results,
sys_prompt= f"Extract the main idea of this paper with less than {NUM_OF_WORD} Chinese characters"
)
final_results.append(gpt_say)
file_write_buffer.extend([i_say, gpt_say])
############################## <第 4 步设置一个token上限> ##################################
_, final_results = input_clipping("", final_results, max_token_limit=3200)
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
chatbot[-1] = (i_say_show_user, gpt_say)
history.append(i_say_show_user); history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
if not fast_debug: time.sleep(2)
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
chatbot.append((i_say, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if not fast_debug:
msg = '正常'
# ** gpt request **
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say,
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history=history,
sys_prompt="总结文章。"
) # 带超时倒计时
chatbot[-1] = (i_say, gpt_say)
history.append(i_say); history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
res = write_results_to_file(history)
chatbot.append(("完成了吗?", res))
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
res = write_results_to_file(file_write_buffer, file_name=gen_time_str())
promote_file_to_downloadzone(res.split('\t')[-1], chatbot=chatbot)
yield from update_ui(chatbot=chatbot, history=final_results) # 刷新界面
@CatchException
@ -151,10 +137,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
return
# 搜索需要处理的文件清单
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
# [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
# 如果没找到任何文件
if len(file_manifest) == 0:

View File

@ -1,5 +1,5 @@
from toolbox import CatchException, report_execption, write_results_to_file
from toolbox import update_ui
from toolbox import update_ui, promote_file_to_downloadzone
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from .crazy_utils import read_and_clean_pdf_text
@ -147,23 +147,14 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
print('writing html result failed:', trimmed_format_exc())
# 准备文件的下载
import shutil
for pdf_path in generated_conclusion_files:
# 重命名文件
rename_file = f'./gpt_log/翻译-{os.path.basename(pdf_path)}'
if os.path.exists(rename_file):
os.remove(rename_file)
shutil.copyfile(pdf_path, rename_file)
if os.path.exists(pdf_path):
os.remove(pdf_path)
rename_file = f'翻译-{os.path.basename(pdf_path)}'
promote_file_to_downloadzone(pdf_path, rename_file=rename_file, chatbot=chatbot)
for html_path in generated_html_files:
# 重命名文件
rename_file = f'./gpt_log/翻译-{os.path.basename(html_path)}'
if os.path.exists(rename_file):
os.remove(rename_file)
shutil.copyfile(html_path, rename_file)
if os.path.exists(html_path):
os.remove(html_path)
rename_file = f'翻译-{os.path.basename(html_path)}'
promote_file_to_downloadzone(html_path, rename_file=rename_file, chatbot=chatbot)
chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@ -8,7 +8,7 @@ def inspect_dependency(chatbot, history):
import manim
return True
except:
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install manimgl```"])
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install manim manimgl```"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return False

View File

@ -13,6 +13,8 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
# 递归地切割PDF文件每一块尽量是完整的一个section比如introductionexperiment等必要时再进行切割
# 的长度必须小于 2500 个 Token
file_content, page_one = read_and_clean_pdf_text(file_name) # 尝试按照章节切割PDF
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
TOKEN_LIMIT_PER_FRAGMENT = 2500

View File

@ -0,0 +1,102 @@
from toolbox import CatchException, update_ui
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
import requests
from bs4 import BeautifulSoup
from request_llm.bridge_all import model_info
def bing_search(query, proxies=None):
query = query
url = f"https://cn.bing.com/search?q={query}"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
response = requests.get(url, headers=headers, proxies=proxies)
soup = BeautifulSoup(response.content, 'html.parser')
results = []
for g in soup.find_all('li', class_='b_algo'):
anchors = g.find_all('a')
if anchors:
link = anchors[0]['href']
if not link.startswith('http'):
continue
title = g.find('h2').text
item = {'title': title, 'link': link}
results.append(item)
for r in results:
print(r['link'])
return results
def scrape_text(url, proxies) -> str:
"""Scrape text from a webpage
Args:
url (str): The URL to scrape text from
Returns:
str: The scraped text
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
'Content-Type': 'text/plain',
}
try:
response = requests.get(url, headers=headers, proxies=proxies, timeout=8)
if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding
except:
return "无法连接到该网页"
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return text
@CatchException
def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,暂时没有用武之地
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
"[Local Message] 请注意,您正在调用一个[函数插件]的模板该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者它可以作为创建新功能函数的模板。您若希望分享新的功能模组请不吝PR"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
# ------------- < 第1步爬取搜索引擎的结果 > -------------
from toolbox import get_conf
proxies, = get_conf('proxies')
urls = bing_search(txt, proxies)
history = []
# ------------- < 第2步依次访问网页 > -------------
max_search_result = 8 # 最多收纳多少个网页的结果
for index, url in enumerate(urls[:max_search_result]):
res = scrape_text(url['link'], proxies)
history.extend([f"{index}份搜索结果:", res])
chatbot.append([f"{index}份搜索结果:", res[:500]+"......"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间我们先及时地做一次界面更新
# ------------- < 第3步ChatGPT综合 > -------------
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
i_say, history = input_clipping( # 裁剪输入从最长的条目开始裁剪防止爆token
inputs=i_say,
history=history,
max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4
)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。"
)
chatbot[-1] = (i_say, gpt_say)
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新

View File

@ -0,0 +1,131 @@
from toolbox import CatchException, update_ui, gen_time_str
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import input_clipping
prompt = """
I have to achieve some functionalities by calling one of the functions below.
Your job is to find the correct funtion to use to satisfy my requirement,
and then write python code to call this function with correct parameters.
These are functions you are allowed to choose from:
1.
功能描述: 总结音视频内容
调用函数: ConcludeAudioContent(txt, llm_kwargs)
参数说明:
txt: 音频文件的路径
llm_kwargs: 模型参数, 永远给定None
2.
功能描述: 将每次对话记录写入Markdown格式的文件中
调用函数: WriteMarkdown()
3.
功能描述: 将指定目录下的PDF文件从英文翻译成中文
调用函数: BatchTranslatePDFDocuments_MultiThreaded(txt, llm_kwargs)
参数说明:
txt: PDF文件所在的路径
llm_kwargs: 模型参数, 永远给定None
4.
功能描述: 根据文本使用GPT模型生成相应的图像
调用函数: ImageGeneration(txt, llm_kwargs)
参数说明:
txt: 图像生成所用到的提示文本
llm_kwargs: 模型参数, 永远给定None
5.
功能描述: 对输入的word文档进行摘要生成
调用函数: SummarizingWordDocuments(input_path, output_path)
参数说明:
input_path: 待处理的word文档路径
output_path: 摘要生成后的文档路径
You should always anwser with following format:
----------------
Code:
```
class AutoAcademic(object):
def __init__(self):
self.selected_function = "FILL_CORRECT_FUNCTION_HERE" # e.g., "GenerateImage"
self.txt = "FILL_MAIN_PARAMETER_HERE" # e.g., "荷叶上的蜻蜓"
self.llm_kwargs = None
```
Explanation:
只有GenerateImage和生成图像相关, 因此选择GenerateImage函数。
----------------
Now, this is my requirement:
"""
def get_fn_lib():
return {
"BatchTranslatePDFDocuments_MultiThreaded": ("crazy_functions.批量翻译PDF文档_多线程", "批量翻译PDF文档"),
"SummarizingWordDocuments": ("crazy_functions.总结word文档", "总结word文档"),
"ImageGeneration": ("crazy_functions.图片生成", "图片生成"),
"TranslateMarkdownFromEnglishToChinese": ("crazy_functions.批量Markdown翻译", "Markdown中译英"),
"SummaryAudioVideo": ("crazy_functions.总结音视频", "总结音视频"),
}
def inspect_dependency(chatbot, history):
return True
def eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
import subprocess, sys, os, shutil, importlib
with open('gpt_log/void_terminal_runtime.py', 'w', encoding='utf8') as f:
f.write(code)
try:
AutoAcademic = getattr(importlib.import_module('gpt_log.void_terminal_runtime', 'AutoAcademic'), 'AutoAcademic')
# importlib.reload(AutoAcademic)
auto_dict = AutoAcademic()
selected_function = auto_dict.selected_function
txt = auto_dict.txt
fp, fn = get_fn_lib()[selected_function]
fn_plugin = getattr(importlib.import_module(fp, fn), fn)
yield from fn_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
except:
from toolbox import trimmed_format_exc
chatbot.append(["执行错误", f"\n```\n{trimmed_format_exc()}\n```\n"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) != 1:
raise RuntimeError("GPT is not generating proper code.")
return matches[0].strip('python') # code block
@CatchException
def 终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
"""
txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
plugin_kwargs 插件模型的参数, 暂时没有用武之地
chatbot 聊天显示框的句柄, 用于显示给用户
history 聊天历史, 前情提要
system_prompt 给gpt的静默提醒
web_port 当前软件运行的端口号
"""
# 清空历史, 以免输入溢出
history = []
# 基本信息:功能、贡献者
chatbot.append(["函数插件功能?", "根据自然语言执行插件命令, 作者: binary-husky, 插件初始化中 ..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# # 尝试导入依赖, 如果缺少依赖, 则给出安装建议
# dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面
# if not dep_ok: return
# 输入
i_say = prompt + txt
# 开始
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=txt,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=""
)
# 将代码转为动画
code = get_code_block(gpt_say)
yield from eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)

View File

@ -6,7 +6,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒
@ -35,7 +35,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒

View File

@ -0,0 +1,188 @@
from toolbox import update_ui
from toolbox import CatchException, get_conf, markdown_convertion
from crazy_functions.crazy_utils import input_clipping
from request_llm.bridge_all import predict_no_ui_long_connection
import threading, time
import numpy as np
from .live_audio.aliyunASR import AliyunASR
import json
class WatchDog():
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
self.last_feed = None
self.timeout = timeout
self.bark_fn = bark_fn
self.interval = interval
self.msg = msg
def watch(self):
while True:
if time.time() - self.last_feed > self.timeout:
if len(self.msg) > 0: print(self.msg)
self.bark_fn()
break
time.sleep(self.interval)
def begin_watch(self):
self.last_feed = time.time()
th = threading.Thread(target=self.watch)
th.daemon = True
th.start()
def feed(self):
self.last_feed = time.time()
def chatbot2history(chatbot):
history = []
for c in chatbot:
for q in c:
if q not in ["[请讲话]", "[等待GPT响应]", "[正在等您说完问题]"]:
history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>'))
return history
class AsyncGptTask():
def __init__(self) -> None:
self.observe_future = []
self.observe_future_chatbot_index = []
def gpt_thread_worker(self, i_say, llm_kwargs, history, sys_prompt, observe_window, index):
try:
MAX_TOKEN_ALLO = 2560
i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO)
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt,
observe_window=observe_window[index], console_slience=True)
except ConnectionAbortedError as token_exceed_err:
print('至少一个线程任务Token溢出而失败', e)
except Exception as e:
print('至少一个线程任务意外失败', e)
def add_async_gpt_task(self, i_say, chatbot_index, llm_kwargs, history, system_prompt):
self.observe_future.append([""])
self.observe_future_chatbot_index.append(chatbot_index)
cur_index = len(self.observe_future)-1
th_new = threading.Thread(target=self.gpt_thread_worker, args=(i_say, llm_kwargs, history, system_prompt, self.observe_future, cur_index))
th_new.daemon = True
th_new.start()
def update_chatbot(self, chatbot):
for of, ofci in zip(self.observe_future, self.observe_future_chatbot_index):
try:
chatbot[ofci] = list(chatbot[ofci])
chatbot[ofci][1] = markdown_convertion(of[0])
except:
self.observe_future = []
self.observe_future_chatbot_index = []
return chatbot
class InterviewAssistant(AliyunASR):
def __init__(self):
self.capture_interval = 0.5 # second
self.stop = False
self.parsed_text = ""
self.parsed_sentence = ""
self.buffered_sentence = ""
self.event_on_result_chg = threading.Event()
self.event_on_entence_end = threading.Event()
self.event_on_commit_question = threading.Event()
def __del__(self):
self.stop = True
def init(self, chatbot):
# 初始化音频采集线程
self.captured_audio = np.array([])
self.keep_latest_n_second = 10
self.commit_after_pause_n_second = 1.5
self.ready_audio_flagment = None
self.stop = False
self.plugin_wd = WatchDog(timeout=5, bark_fn=self.__del__, msg="程序终止")
self.aut = threading.Thread(target=self.audio_convertion_thread, args=(chatbot._cookies['uuid'],))
self.aut.daemon = True
self.aut.start()
# th2 = threading.Thread(target=self.audio2txt_thread, args=(chatbot._cookies['uuid'],))
# th2.daemon = True
# th2.start()
def no_audio_for_a_while(self):
if len(self.buffered_sentence) < 7: # 如果一句话小于7个字暂不提交
self.commit_wd.begin_watch()
else:
self.event_on_commit_question.set()
def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
# main plugin function
self.init(chatbot)
chatbot.append(["[请讲话]", "[正在等您说完问题]"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
self.plugin_wd.begin_watch()
self.agt = AsyncGptTask()
self.commit_wd = WatchDog(timeout=self.commit_after_pause_n_second, bark_fn=self.no_audio_for_a_while, interval=0.2)
self.commit_wd.begin_watch()
while True:
self.event_on_result_chg.wait(timeout=0.25) # run once every 0.25 second
chatbot = self.agt.update_chatbot(chatbot) # 将子线程的gpt结果写入chatbot
history = chatbot2history(chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
self.plugin_wd.feed()
if self.event_on_result_chg.is_set():
# update audio decode result
self.event_on_result_chg.clear()
chatbot[-1] = list(chatbot[-1])
chatbot[-1][0] = self.buffered_sentence + self.parsed_text
history = chatbot2history(chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
self.commit_wd.feed()
if self.event_on_entence_end.is_set():
# called when a sentence has ended
self.event_on_entence_end.clear()
self.parsed_text = self.parsed_sentence
self.buffered_sentence += self.parsed_sentence
if self.event_on_commit_question.is_set():
# called when a question should be commited
self.event_on_commit_question.clear()
if len(self.buffered_sentence) == 0: raise RuntimeError
self.commit_wd.begin_watch()
chatbot[-1] = list(chatbot[-1])
chatbot[-1] = [self.buffered_sentence, "[等待GPT响应]"]
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# add gpt task 创建子线程请求gpt避免线程阻塞
history = chatbot2history(chatbot)
self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt)
self.buffered_sentence = ""
chatbot.append(["[请讲话]", "[正在等您说完问题]"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@CatchException
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
# pip install -U openai-whisper
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
import nls
from scipy import io
except:
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
TOKEN, APPKEY = get_conf('ALIYUN_TOKEN', 'ALIYUN_APPKEY')
if TOKEN == "" or APPKEY == "":
chatbot.append(["导入依赖失败", "没有阿里云语音识别APPKEY和TOKEN, 详情见https://help.aliyun.com/document_detail/450255.html"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
ia = InterviewAssistant()
yield from ia.begin(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)

View File

@ -0,0 +1,28 @@
# encoding: utf-8
# @Time : 2023/4/19
# @Author : Spike
# @Descr :
from toolbox import update_ui
from toolbox import CatchException, report_execption, write_results_to_file
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@CatchException
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
if txt:
show_say = txt
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
else:
prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。"
show_say = '分析上述回答,再列出用户可能提出的三个问题。'
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=prompt,
inputs_show_user=show_say,
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history=history,
sys_prompt=system_prompt
)
chatbot[-1] = (show_say, gpt_say)
history.extend([show_say, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

View File

@ -6,7 +6,7 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数如温度和top_p等一般原样传递下去就行
plugin_kwargs 插件模型的参数,暂时没有用武之地
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
chatbot 聊天显示框的句柄,用于显示给用户
history 聊天历史,前情提要
system_prompt 给gpt的静默提醒

View File

@ -6,7 +6,7 @@
version: '3'
services:
gpt_academic_nolocalllms:
image: ghcr.io/binary-husky/gpt_academic_nolocal:master
image: ghcr.io/binary-husky/gpt_academic_nolocal:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal)
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
@ -33,7 +33,7 @@ services:
version: '3'
services:
gpt_academic_with_chatglm:
image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master
image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master # (Auto Built by Dockerfile: docs/Dockerfile+ChatGLM)
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
@ -63,7 +63,7 @@ services:
version: '3'
services:
gpt_academic_with_rwkv:
image: fuqingxu/gpt_academic:jittorllms # [option 2] 如果需要运行ChatGLM本地模型
image: fuqingxu/gpt_academic:jittorllms
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
@ -103,3 +103,30 @@ services:
echo '[jittorllms] 正在从github拉取最新代码...' &&
git --git-dir=request_llm/jittorllms/.git --work-tree=request_llm/jittorllms pull --force &&
python3 -u main.py"
## ===================================================
## 【方案四】 chatgpt + Latex
## ===================================================
version: '3'
services:
gpt_academic_with_latex:
image: ghcr.io/binary-husky/gpt_academic_with_latex:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex)
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
USE_PROXY: ' True '
proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
LLM_MODEL: ' gpt-3.5-turbo '
AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4"] '
LOCAL_MODEL_DEVICE: ' cuda '
DEFAULT_WORKER_NUM: ' 10 '
WEB_PORT: ' 12303 '
# 与宿主的网络融合
network_mode: "host"
# 不使用代理网络拉取最新代码
command: >
bash -c "python3 -u main.py"

View File

@ -0,0 +1,27 @@
# 此Dockerfile适用于“无本地模型”的环境构建如果需要使用chatglm等本地模型请参考 docs/Dockerfile+ChatGLM
# - 1 修改 `config.py`
# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/Dockerfile+NoLocal+Latex .
# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex
FROM fuqingxu/python311_texlive_ctex:latest
# 指定路径
WORKDIR /gpt
ARG useProxyNetwork=''
RUN $useProxyNetwork pip3 install gradio openai numpy arxiv rich -i https://pypi.douban.com/simple/
RUN $useProxyNetwork pip3 install colorama Markdown pygments pymupdf -i https://pypi.douban.com/simple/
# 装载项目文件
COPY . .
# 安装依赖
RUN $useProxyNetwork pip3 install -r requirements.txt -i https://pypi.douban.com/simple/
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
# 启动
CMD ["python3", "-u", "main.py"]

View File

@ -0,0 +1,25 @@
# 此Dockerfile适用于“无本地模型”的环境构建如果需要使用chatglm等本地模型请参考 docs/Dockerfile+ChatGLM
# - 1 修改 `config.py`
# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/Dockerfile+NoLocal+Latex .
# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex
FROM fuqingxu/python311_texlive_ctex:latest
# 指定路径
WORKDIR /gpt
RUN pip3 install gradio openai numpy arxiv rich
RUN pip3 install colorama Markdown pygments pymupdf
# 装载项目文件
COPY . .
# 安装依赖
RUN pip3 install -r requirements.txt
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
# 启动
CMD ["python3", "-u", "main.py"]

View File

@ -2,11 +2,11 @@
>
> Durante l'installazione delle dipendenze, selezionare rigorosamente le **versioni specificate** nel file requirements.txt.
>
> ` pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`
> ` pip install -r requirements.txt`
# <img src="docs/logo.png" width="40" > GPT Ottimizzazione Accademica (GPT Academic)
# <img src="logo.png" width="40" > GPT Ottimizzazione Accademica (GPT Academic)
**Se ti piace questo progetto, ti preghiamo di dargli una stella. Se hai sviluppato scorciatoie accademiche o plugin funzionali più utili, non esitare ad aprire una issue o pull request. Abbiamo anche una README in [Inglese|](docs/README_EN.md)[Giapponese|](docs/README_JP.md)[Coreano|](https://github.com/mldljyh/ko_gpt_academic)[Russo|](docs/README_RS.md)[Francese](docs/README_FR.md) tradotta da questo stesso progetto.
**Se ti piace questo progetto, ti preghiamo di dargli una stella. Se hai sviluppato scorciatoie accademiche o plugin funzionali più utili, non esitare ad aprire una issue o pull request. Abbiamo anche una README in [Inglese|](README_EN.md)[Giapponese|](README_JP.md)[Coreano|](https://github.com/mldljyh/ko_gpt_academic)[Russo|](README_RS.md)[Francese](README_FR.md) tradotta da questo stesso progetto.
Per tradurre questo progetto in qualsiasi lingua con GPT, leggere e eseguire [`multi_language.py`](multi_language.py) (sperimentale).
> **Nota**
@ -17,7 +17,9 @@ Per tradurre questo progetto in qualsiasi lingua con GPT, leggere e eseguire [`m
>
> 3. Questo progetto è compatibile e incoraggia l'utilizzo di grandi modelli di linguaggio di produzione nazionale come chatglm, RWKV, Pangu ecc. Supporta la coesistenza di più api-key e può essere compilato nel file di configurazione come `API_KEY="openai-key1,openai-key2,api2d-key3"`. Per sostituire temporaneamente `API_KEY`, inserire `API_KEY` temporaneo nell'area di input e premere Invio per renderlo effettivo.
<div align="center">Funzione | Descrizione
<div align="center">
Funzione | Descrizione
--- | ---
Correzione immediata | Supporta correzione immediata e ricerca degli errori di grammatica del documento con un solo clic
Traduzione cinese-inglese immediata | Traduzione cinese-inglese immediata con un solo clic
@ -41,6 +43,8 @@ Avvia il tema di gradio [scuro](https://github.com/binary-husky/chatgpt_academic
Supporto per maggiori modelli LLM, supporto API2D | Sentirsi serviti simultaneamente da GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) deve essere una grande sensazione, giusto?
Ulteriori modelli LLM supportat,i supporto per l'implementazione di Huggingface | Aggiunta di un'interfaccia Newbing (Nuovo Bing), introdotta la compatibilità con Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) e [PanGu-α](https://openi.org.cn/pangu/)
Ulteriori dimostrazioni di nuove funzionalità (generazione di immagini, ecc.)... | Vedere la fine di questo documento...
</div>
- Nuova interfaccia (modificare l'opzione LAYOUT in `config.py` per passare dal layout a sinistra e a destra al layout superiore e inferiore)
<div align="center">
@ -206,7 +210,9 @@ La difficoltà di scrittura e debug dei plugin del nostro progetto è molto bass
---
# Ultimo aggiornamento
## Nuove funzionalità dinamiche1. Funzionalità di salvataggio della conversazione. Nell'area dei plugin della funzione, fare clic su "Salva la conversazione corrente" per salvare la conversazione corrente come file html leggibile e ripristinabile, inoltre, nell'area dei plugin della funzione (menu a discesa), fare clic su "Carica la cronologia della conversazione archiviata" per ripristinare la conversazione precedente. Suggerimento: fare clic su "Carica la cronologia della conversazione archiviata" senza specificare il file consente di visualizzare la cache degli archivi html di cronologia, fare clic su "Elimina tutti i record di cronologia delle conversazioni locali" per eliminare tutte le cache degli archivi html.
## Nuove funzionalità dinamiche
1. Funzionalità di salvataggio della conversazione. Nell'area dei plugin della funzione, fare clic su "Salva la conversazione corrente" per salvare la conversazione corrente come file html leggibile e ripristinabile, inoltre, nell'area dei plugin della funzione (menu a discesa), fare clic su "Carica la cronologia della conversazione archiviata" per ripristinare la conversazione precedente. Suggerimento: fare clic su "Carica la cronologia della conversazione archiviata" senza specificare il file consente di visualizzare la cache degli archivi html di cronologia, fare clic su "Elimina tutti i record di cronologia delle conversazioni locali" per eliminare tutte le cache degli archivi html.
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
</div>

View File

@ -17,7 +17,9 @@ GPT를 이용하여 프로젝트를 임의의 언어로 번역하려면 [`multi_
>
> 3. 이 프로젝트는 국내 언어 모델 chatglm과 RWKV, 판고 등의 시도와 호환 가능합니다. 여러 개의 api-key를 지원하며 설정 파일에 "API_KEY="openai-key1,openai-key2,api2d-key3""와 같이 작성할 수 있습니다. `API_KEY`를 임시로 변경해야하는 경우 입력 영역에 임시 `API_KEY`를 입력 한 후 엔터 키를 누르면 즉시 적용됩니다.
<div align="center">기능 | 설명
<div align="center">
기능 | 설명
--- | ---
원 키워드 | 원 키워드 및 논문 문법 오류를 찾는 기능 지원
한-영 키워드 | 한-영 키워드 지원

View File

@ -2,7 +2,7 @@
>
> Ao instalar as dependências, por favor, selecione rigorosamente as versões **especificadas** no arquivo requirements.txt.
>
> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`
> `pip install -r requirements.txt`
>
# <img src="logo.png" width="40" > Otimização acadêmica GPT (GPT Academic)
@ -18,7 +18,9 @@ Para traduzir este projeto para qualquer idioma com o GPT, leia e execute [`mult
>
> 3. Este projeto é compatível com e incentiva o uso de modelos de linguagem nacionais, como chatglm e RWKV, Pangolin, etc. Suporta a coexistência de várias chaves de API e pode ser preenchido no arquivo de configuração como `API_KEY="openai-key1,openai-key2,api2d-key3"`. Quando precisar alterar temporariamente o `API_KEY`, basta digitar o `API_KEY` temporário na área de entrada e pressionar Enter para que ele entre em vigor.
<div align="center">Funcionalidade | Descrição
<div align="center">
Funcionalidade | Descrição
--- | ---
Um clique de polimento | Suporte a um clique polimento, um clique encontrar erros de gramática no artigo
Tradução chinês-inglês de um clique | Tradução chinês-inglês de um clique
@ -216,7 +218,9 @@ Para mais detalhes, consulte o [Guia do plug-in de função.](https://github.com
---
# Última atualização
## Novas funções dinâmicas.1. Função de salvamento de diálogo. Ao chamar o plug-in de função "Salvar diálogo atual", é possível salvar o diálogo atual em um arquivo html legível e reversível. Além disso, ao chamar o plug-in de função "Carregar arquivo de histórico de diálogo" no menu suspenso da área de plug-in, é possível restaurar uma conversa anterior. Dica: clicar em "Carregar arquivo de histórico de diálogo" sem especificar um arquivo permite visualizar o cache do arquivo html de histórico. Clicar em "Excluir todo o registro de histórico de diálogo local" permite excluir todo o cache de arquivo html.
## Novas funções dinâmicas.
1. Função de salvamento de diálogo. Ao chamar o plug-in de função "Salvar diálogo atual", é possível salvar o diálogo atual em um arquivo html legível e reversível. Além disso, ao chamar o plug-in de função "Carregar arquivo de histórico de diálogo" no menu suspenso da área de plug-in, é possível restaurar uma conversa anterior. Dica: clicar em "Carregar arquivo de histórico de diálogo" sem especificar um arquivo permite visualizar o cache do arquivo html de histórico. Clicar em "Excluir todo o registro de histórico de diálogo local" permite excluir todo o cache de arquivo html.
<div align="center">
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
</div>

View File

@ -58,6 +58,8 @@
"连接网络回答问题": "ConnectToNetworkToAnswerQuestions",
"联网的ChatGPT": "ChatGPTConnectedToNetwork",
"解析任意code项目": "ParseAnyCodeProject",
"读取知识库作答": "ReadKnowledgeArchiveAnswerQuestions",
"知识库问答": "UpdateKnowledgeArchive",
"同时问询_指定模型": "InquireSimultaneously_SpecifiedModel",
"图片生成": "ImageGeneration",
"test_解析ipynb文件": "Test_ParseIpynbFile",
@ -1665,5 +1667,472 @@
"段音频的主要内容": "The main content of the segment audio is",
"z$ 分别是空间直角坐标系中的三个坐标": "z$, respectively, are the three coordinates in the spatial rectangular coordinate system",
"这个是怎么识别的呢我也不清楚": "I'm not sure how this is recognized",
"从现在起": "From now on"
"从现在起": "From now on",
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
"联网的ChatGPT_bing版": "OnlineChatGPT_BingEdition",
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
"Langchain知识库": "LangchainKnowledgeBase",
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
"Latex输出PDF结果": "OutputPDFFromLatex",
"Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF",
"sprint亮靛": "SprintIndigo",
"寻找Latex主文件": "FindLatexMainFile",
"专业词汇声明": "ProfessionalTerminologyDeclaration",
"Latex精细分解与转化": "DecomposeAndConvertLatex",
"编译Latex": "CompileLatex",
"如果您是论文原作者": "If you are the original author of the paper",
"正在编译对比PDF": "Compiling the comparison PDF",
"将 \\include 命令转换为 \\input 命令": "Converting the \\include command to the \\input command",
"取评分最高者返回": "Returning the highest-rated one",
"不要修改!! 高危设置!通过修改此设置": "Do not modify!! High-risk setting! By modifying this setting",
"Tex源文件缺失": "Tex source file is missing!",
"6.25 加入判定latex模板的代码": "Added code to determine the latex template on June 25",
"正在精细切分latex文件": "Finely splitting the latex file",
"获取response失败": "Failed to get response",
"手动指定语言": "Manually specify the language",
"输入arxivID": "Enter arxivID",
"对输入的word文档进行摘要生成": "Generate a summary of the input word document",
"将指定目录下的PDF文件从英文翻译成中文": "Translate PDF files from English to Chinese in the specified directory",
"如果分析错误": "If the analysis is incorrect",
"尝试第": "Try the",
"用户填3": "User fills in 3",
"请在此处追加更细致的矫错指令": "Please append more detailed correction instructions here",
"为了防止大语言模型的意外谬误产生扩散影响": "To prevent the accidental spread of errors in large language models",
"前面是中文冒号": "The colon before is in Chinese",
"内含已经翻译的Tex文档": "Contains a Tex document that has been translated",
"成功啦": "Success!",
"刷新页面即可以退出UpdateKnowledgeArchive模式": "Refresh the page to exit UpdateKnowledgeArchive mode",
"或者不在环境变量PATH中": "Or not in the environment variable PATH",
"--读取文件": "--Read the file",
"才能继续下面的步骤": "To continue with the next steps",
"代理数据解析失败": "Proxy data parsing failed",
"详见项目主README.md": "See the main README.md of the project for details",
"临时存储用于调试": "Temporarily stored for debugging",
"屏蔽空行和太短的句子": "Filter out empty lines and sentences that are too short",
"gpt 多线程请求": "GPT multi-threaded request",
"编译已经开始": "Compilation has started",
"无法找到一个主Tex文件": "Cannot find a main Tex file",
"修复括号": "Fix parentheses",
"请您不要删除或修改这行警告": "Please do not delete or modify this warning",
"请登录OpenAI查看详情 https": "Please log in to OpenAI to view details at https",
"调用函数": "Call a function",
"请查看终端的输出或耐心等待": "Please check the output in the terminal or wait patiently",
"LatexEnglishCorrection+高亮修正位置": "Latex English correction + highlight correction position",
"行": "line",
"Newbing 请求失败": "Newbing request failed",
"转化PDF编译是否成功": "Check if the conversion to PDF and compilation were successful",
"建议更换代理协议": "Recommend changing the proxy protocol",
"========================================= 插件主程序1 =====================================================": "========================================= Plugin Main Program 1 =====================================================",
"终端": "terminal",
"请先上传文件素材": "Please upload file materials first",
"前面是中文逗号": "There is a Chinese comma in front",
"请尝试把以下指令复制到高级参数区": "Please try copying the following instructions to the advanced parameters section",
"翻译-": "Translation -",
"请耐心等待": "Please be patient",
"将前后断行符脱离": "Remove line breaks before and after",
"json等": "JSON, etc.",
"生成中文PDF": "Generate Chinese PDF",
"用红色标注处保留区": "Use red color to highlight the reserved area",
"对比PDF编译是否成功": "Compare if the PDF compilation was successful",
"回答完问题后": "After answering the question",
"其他操作系统表现未知": "Unknown performance on other operating systems",
"-构建知识库": "Build knowledge base",
"还原原文": "Restore original text",
"或者重启之后再度尝试": "Or try again after restarting",
"免费": "Free",
"仅在Windows系统进行了测试": "Tested only on Windows system",
"欢迎加REAME中的QQ联系开发者": "Feel free to contact the developer via QQ in REAME",
"当前知识库内的有效文件": "Valid files in the current knowledge base",
"您可以到Github Issue区": "You can go to the Github Issue area",
"刷新Gradio前端界面": "Refresh the Gradio frontend interface",
"吸收title与作者以上的部分": "Include the title and the above part of the author",
"给出一些判定模板文档的词作为扣分项": "Provide some words in the template document as deduction items",
"--读取参数": "-- Read parameters",
"然后进行问答": "And then perform question-answering",
"根据自然语言执行插件命令": "Execute plugin commands based on natural language",
"*{\\scriptsize\\textbf{警告": "*{\\scriptsize\\textbf{Warning",
"但请查收结果": "But please check the results",
"翻译内容可靠性无保障": "No guarantee of translation accuracy",
"寻找主文件": "Find the main file",
"消耗时间的函数": "Time-consuming function",
"当前语言模型温度设定": "Current language model temperature setting",
"这需要一段时间计算": "This requires some time to calculate",
"为啥chatgpt会把cite里面的逗号换成中文逗号呀": "Why does ChatGPT change commas inside 'cite' to Chinese commas?",
"发现已经存在翻译好的PDF文档": "Found an already translated PDF document",
"待提取的知识库名称id": "Knowledge base name ID to be extracted",
"文本碎片重组为完整的tex片段": "Reassemble text fragments into complete tex fragments",
"注意事项": "Notes",
"参数说明": "Parameter description",
"或代理节点": "Or proxy node",
"构建知识库": "Building knowledge base",
"报错信息如下. 如果是与网络相关的问题": "Error message as follows. If it is related to network issues",
"功能描述": "Function description",
"禁止移除或修改此警告": "Removal or modification of this warning is prohibited",
"Arixv翻译": "Arixv translation",
"读取优先级": "Read priority",
"包含documentclass关键字": "Contains the documentclass keyword",
"根据文本使用GPT模型生成相应的图像": "Generate corresponding images using GPT model based on the text",
"图像生成所用到的提示文本": "Prompt text used for image generation",
"Your account is not active. OpenAI以账户失效为由": "Your account is not active. OpenAI states that it is due to account expiration",
"快捷的调试函数": "Convenient debugging function",
"在多Tex文档中": "In multiple Tex documents",
"因此选择GenerateImage函数": "Therefore, choose the GenerateImage function",
"当前工作路径为": "The current working directory is",
"实际得到格式": "Obtained format in reality",
"这段代码定义了一个名为TempProxy的空上下文管理器": "This code defines an empty context manager named TempProxy",
"吸收其他杂项": "Absorb other miscellaneous items",
"请输入要翻译成哪种语言": "Please enter which language to translate into",
"的单词": "of the word",
"正在尝试自动安装": "Attempting automatic installation",
"如果有必要": "If necessary",
"开始下载": "Start downloading",
"项目Github地址 \\url{https": "Project GitHub address \\url{https",
"将根据报错信息修正tex源文件并重试": "The Tex source file will be corrected and retried based on the error message",
"发送至azure openai api": "Send to Azure OpenAI API",
"吸收匿名公式": "Absorb anonymous formulas",
"用该压缩包+ConversationHistoryArchive进行反馈": "Provide feedback using the compressed package + ConversationHistoryArchive",
"需要特殊依赖": "Requires special dependencies",
"还原部分原文": "Restore part of the original text",
"构建完成": "Build completed",
"解析arxiv网址失败": "Failed to parse arXiv URL",
"输入问题后点击该插件": "Click the plugin after entering the question",
"请求子进程": "Requesting subprocess",
"请务必用 pip install -r requirements.txt 指令安装依赖": "Please make sure to install the dependencies using the 'pip install -r requirements.txt' command",
"如果程序停顿5分钟以上": "If the program pauses for more than 5 minutes",
"转化PDF编译已经成功": "Conversion to PDF compilation was successful",
"虽然PDF生成失败了": "Although PDF generation failed",
"分析上述回答": "Analyze the above answer",
"吸收在42行以内的begin-end组合": "Absorb the begin-end combination within 42 lines",
"推荐http": "Recommend http",
"Latex没有安装": "Latex is not installed",
"用latex编译为PDF对修正处做高亮": "Compile to PDF using LaTeX and highlight the corrections",
"reverse 操作必须放在最后": "'reverse' operation must be placed at the end",
"AZURE OPENAI API拒绝了请求": "AZURE OPENAI API rejected the request",
"该项目的Latex主文件是": "The main LaTeX file of this project is",
"You are associated with a deactivated account. OpenAI以账户失效为由": "You are associated with a deactivated account. OpenAI considers it as an account expiration",
"它*必须*被包含在AVAIL_LLM_MODELS列表中": "It *must* be included in the AVAIL_LLM_MODELS list",
"未知指令": "Unknown command",
"尝试执行Latex指令失败": "Failed to execute the LaTeX command",
"摘要生成后的文档路径": "Path of the document after summary generation",
"GPT结果已输出": "GPT result has been outputted",
"使用Newbing": "Using Newbing",
"其他模型转化效果未知": "Unknown conversion effect of other models",
"P.S. 但愿没人把latex模板放在里面传进来": "P.S. Hopefully, no one passes a LaTeX template in it",
"定位主Latex文件": "Locate the main LaTeX file",
"后面是英文冒号": "English colon follows",
"文档越长耗时越长": "The longer the document, the longer it takes.",
"压缩包": "Compressed file",
"但通常不会出现在正文": "But usually does not appear in the body.",
"正在预热文本向量化模组": "Preheating text vectorization module",
"5刀": "5 dollars",
"提问吧! 但注意": "Ask questions! But be careful",
"发送至AZURE OPENAI API": "Send to AZURE OPENAI API",
"请仔细鉴别并以原文为准": "Please carefully verify and refer to the original text",
"如果需要使用AZURE 详情请见额外文档 docs\\use_azure.md": "If you need to use AZURE, please refer to the additional document docs\\use_azure.md for details",
"使用正则表达式查找半行注释": "Use regular expressions to find inline comments",
"只有第二步成功": "Only the second step is successful",
"P.S. 顺便把CTEX塞进去以支持中文": "P.S. By the way, include CTEX to support Chinese",
"安装方法https": "Installation method: https",
"则跳过GPT请求环节": "Then skip the GPT request process",
"请切换至“UpdateKnowledgeArchive”插件进行知识库访问": "Please switch to the 'UpdateKnowledgeArchive' plugin for knowledge base access",
"=================================== 工具函数 ===============================================": "=================================== Utility functions ===============================================",
"填入azure openai api的密钥": "Fill in the Azure OpenAI API key",
"上传Latex压缩包": "Upload LaTeX compressed file",
"远程云服务器部署": "Deploy to remote cloud server",
"用黑色标注转换区": "Use black color to annotate the conversion area",
"音频文件的路径": "Path to the audio file",
"必须包含documentclass": "Must include documentclass",
"再列出用户可能提出的三个问题": "List three more questions that the user might ask",
"根据需要切换prompt": "Switch the prompt as needed",
"将文件复制一份到下载区": "Make a copy of the file in the download area",
"次编译": "Second compilation",
"Latex文件融合完成": "LaTeX file merging completed",
"返回": "Return",
"后面是英文逗号": "Comma after this",
"对不同latex源文件扣分": "Deduct points for different LaTeX source files",
"失败啦": "Failed",
"编译BibTex": "Compile BibTeX",
"Linux下必须使用Docker安装": "Must install using Docker on Linux",
"报错信息": "Error message",
"删除或修改歧义文件": "Delete or modify ambiguous files",
"-预热文本向量化模组": "- Preheating text vectorization module",
"将每次对话记录写入Markdown格式的文件中": "Write each conversation record into a file in Markdown format",
"其他类型文献转化效果未知": "Unknown conversion effect for other types of literature",
"获取线程锁": "Acquire thread lock",
"使用英文": "Use English",
"如果存在调试缓存文件": "If there is a debug cache file",
"您需要首先调用构建知识库": "You need to call the knowledge base building first",
"原始PDF编译是否成功": "Whether the original PDF compilation is successful",
"生成 azure openai api请求": "Generate Azure OpenAI API requests",
"正在编译PDF": "Compiling PDF",
"仅调试": "Debug only",
"========================================= 插件主程序2 =====================================================": "========================================= Plugin Main Program 2 =====================================================",
"多线程翻译开始": "Multithreaded translation begins",
"出问题了": "There is a problem",
"版权归原文作者所有": "Copyright belongs to the original author",
"当前大语言模型": "Current large language model",
"目前对机器学习类文献转化效果最好": "Currently, the best conversion effect for machine learning literature",
"这个paper有个input命令文件名大小写错误": "This paper has an input command with a filename case error!",
"期望格式例如": "Expected format, for example",
"解决部分词汇翻译不准确的问题": "Resolve the issue of inaccurate translation for some terms",
"待注入的知识库名称id": "Name/ID of the knowledge base to be injected",
"精细切分latex文件": "Fine-grained segmentation of LaTeX files",
"永远给定None": "Always given None",
"work_folder = Latex预处理": "work_folder = LaTeX preprocessing",
"请直接去该路径下取回翻译结果": "Please directly go to the path to retrieve the translation results",
"寻找主tex文件": "Finding the main .tex file",
"模型参数": "Model parameters",
"返回找到的第一个": "Return the first one found",
"编译转化后的PDF": "Compile the converted PDF",
"\\SEAFILE_LOCALŅ03047\\我的资料库\\music\\Akie秋绘-未来轮廓.mp3": "\\SEAFILE_LOCALŅ03047\\My Library\\music\\Akie秋绘-未来轮廓.mp3",
"拆分过长的latex片段": "Splitting overly long LaTeX fragments",
"没有找到任何可读取文件": "No readable files found",
"暗色模式 / 亮色模式": "Dark mode / Light mode",
"检测到arxiv文档连接": "Detected arXiv document link",
"此插件Windows支持最佳": "This plugin has best support for Windows",
"本地论文翻译": "Local paper translation",
"输出html调试文件": "Output HTML debugging file",
"以下所有配置也都支持利用环境变量覆写": "All the following configurations can also be overridden using environment variables",
"PDF文件所在的路径": "Path of the PDF file",
"也是可读的": "It is also readable",
"将消耗较长时间下载中文向量化模型": "Downloading Chinese vectorization model will take a long time",
"环境变量配置格式见docker-compose.yml": "See docker-compose.yml for the format of environment variable configuration",
"编译文献交叉引用": "Compile bibliographic cross-references",
"默认为default": "Default is 'default'",
"或者使用此插件继续上传更多文件": "Or use this plugin to continue uploading more files",
"该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成": "This PDF is generated by the GPT-Academic open-source project using a large language model + LaTeX translation plugin",
"使用latexdiff生成论文转化前后对比": "Use latexdiff to generate before and after comparison of paper transformation",
"正在编译PDF文档": "Compiling PDF document",
"读取config.py文件中关于AZURE OPENAI API的信息": "Read the information about AZURE OPENAI API from the config.py file",
"配置教程&视频教程": "Configuration tutorial & video tutorial",
"临时地启动代理网络": "Temporarily start proxy network",
"临时地激活代理网络": "Temporarily activate proxy network",
"功能尚不稳定": "Functionality is unstable",
"默认为Chinese": "Default is Chinese",
"请查收结果": "Please check the results",
"将 chatglm 直接对齐到 chatglm2": "Align chatglm directly to chatglm2",
"中读取数据构建知识库": "Build a knowledge base by reading data in",
"用于给一小段代码上代理": "Used to proxy a small piece of code",
"分析结果": "Analysis results",
"依赖不足": "Insufficient dependencies",
"Markdown翻译": "Markdown translation",
"除非您是论文的原作者": "Unless you are the original author of the paper",
"test_LangchainKnowledgeBase读取": "test_LangchainKnowledgeBase read",
"将多文件tex工程融合为一个巨型tex": "Merge multiple tex projects into one giant tex",
"吸收iffalse注释": "Absorb iffalser comments",
"您接下来不能再使用其他插件了": "You can no longer use other plugins next",
"正在构建知识库": "Building knowledge base",
"需Latex": "Requires Latex",
"即找不到": "That is not found",
"保证括号正确": "Ensure parentheses are correct",
"= 2 通过一些Latex模板中常见": "= 2 through some common Latex templates",
"请立即终止程序": "Please terminate the program immediately",
"解压失败! 需要安装pip install rarfile来解压rar文件": "Decompression failed! Install 'pip install rarfile' to decompress rar files",
"请在此处给出自定义翻译命令": "Please provide custom translation command here",
"解压失败! 需要安装pip install py7zr来解压7z文件": "Decompression failed! Install 'pip install py7zr' to decompress 7z files",
"执行错误": "Execution error",
"目前仅支持GPT3.5/GPT4": "Currently only supports GPT3.5/GPT4",
"P.S. 顺便把Latex的注释去除": "P.S. Also remove comments from Latex",
"写出文件": "Write out the file",
"当前报错的latex代码处于第": "The current error in the LaTeX code is on line",
"主程序即将开始": "Main program is about to start",
"详情信息见requirements.txt": "See details in requirements.txt",
"释放线程锁": "Release thread lock",
"由于最为关键的转化PDF编译失败": "Due to the critical failure of PDF conversion and compilation",
"即将退出": "Exiting soon",
"尝试下载": "Attempting to download",
"删除整行的空注释": "Remove empty comments from the entire line",
"也找不到": "Not found either",
"从一批文件": "From a batch of files",
"编译结束": "Compilation finished",
"调用缓存": "Calling cache",
"只有GenerateImage和生成图像相关": "Only GenerateImage and image generation related",
"待处理的word文档路径": "Path of the word document to be processed",
"是否在提交时自动清空输入框": "Whether to automatically clear the input box upon submission",
"检查结果": "Check the result",
"生成时间戳": "Generate a timestamp",
"编译原始PDF": "Compile the original PDF",
"填入ENGINE": "Fill in ENGINE",
"填入api版本": "Fill in the API version",
"中文Bing版": "Chinese Bing version",
"当前支持的格式包括": "Currently supported formats include",
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
"语音助手": "VoiceAssistant",
"微调数据集生成": "FineTuneDatasetGeneration",
"chatglm微调工具": "ChatGLMFineTuningTool",
"解析一个Rect项目": "ParseARectProject",
"用于灵活调整复杂功能的各种参数": "Various parameters for flexible adjustment of complex functions",
"阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https": "Aliyun real-time speech recognition has high configuration difficulty and is only recommended for advanced users. Refer to https",
"/* 设定聊天气泡的样式": "/* Set the style of chat bubbles",
"解析整个React项目": "Parsing the entire React project",
"后来也传播到了其他游戏中": "Later it spread to other games",
"多线程demo": "Multithreading demo",
"最初起源于游戏《CSGO》": "Originally originated from the game 'CSGO'",
"首先你在中文语境下通读整篇论文": "First, you read the entire paper in a Chinese context",
"避免线程阻塞": "Avoid thread blocking",
"其中第1份搜索结果中的一篇文章也指出": "One article in the first search result also points out",
"3. 更新速度": "3. Update speed",
"极少数情况下": "In very few cases",
"例如 RoPlZrM88DnAFkZK": "For example, RoPlZrM88DnAFkZK",
"每秒采样数量": "Number of samples per second",
"以及其中不少玩家以极端立场宣扬的想法和言论": "And many players promote extreme ideas and opinions",
"音频助手": "Audio assistant",
"还需要填写组织": "Also need to fill in the organization",
"计算平方和": "Calculate the sum of squares",
"请向下翻": "Please scroll down",
"起源于哪里": "Where does it originate from",
"并且网友们还 给这位UP主起了“苍蝇侠”的外号": "And netizens also gave this UP host the nickname 'Flyman'",
"第一次调用": "First call",
"点击“停止”键可终止程序": "Click the 'Stop' button to terminate the program",
"在执行完成之后": "After execution is complete",
"老六是网络流行语": "Lao Liu is an internet slang",
"请先将.doc文档转换为.docx文档": "Please convert the .doc document to .docx document first",
"从第0份、第1份、第2份搜索结果可以看出": "It can be seen from the 0th, 1st, and 2nd search results",
"如 绿帽子*深蓝色衬衫*黑色运动裤": "Such as green hat * dark blue shirt * black sports pants",
"对这个人外貌、身处的环境、内心世界、过去经历进行描写": "Describing the appearance, environment, inner world, and past experiences of this person",
"沙特和伊朗于3月10日达成了恢复两国外交关系的协议": "Saudi Arabia and Iran reached an agreement on March 10th to restore diplomatic relations between the two countries",
"例如 f37f30e0f9934c34a992f6f64f7eba4f": "For example, f37f30e0f9934c34a992f6f64f7eba4f",
"避免多用户干扰": "Avoiding interference from multiple users",
"AutoGPT是一个基于GPT-4语言模型的开源应用程序": "AutoGPT is an open-source application based on the GPT-4 language model",
"开始最终总结": "Start the final summary",
"包括事件分析、营销方案撰写、代码编程、数学运算等等": "Including event analysis, writing marketing plans, coding, mathematical calculations, etc.",
"包括背景颜色、内、外边距、圆角": "Including background color, padding, margin, and border-radius",
"只读": "Read-only",
"/* 设置表头单元格的内边距为0.5em和0.2em. */": "/* Set the padding of the table header cells to 0.5em and 0.2em. */",
"游戏中的一个情节": "A plot in the game",
"如果一句话小于7个字": "If a sentence is less than 7 words",
"赋予插件状态": "Assigning plugin status",
"用第二人称": "Using the second person",
"上传本地文件/压缩包供函数插件调用": "Upload local files/compressed packages for function plugin calls",
"要求": "Requirements",
"正在等您说完问题": "Waiting for you to finish the question",
"后来逐渐演变成指玩得比较阴险的玩家": "Later it gradually evolved to refer to players who play in a more cunning way",
"当下一次用户提交时": "When the next user submits",
"色彩主体": "Color theme",
"例如 “Espe-": "For example, 'Espe-'",
"为每一位访问的用户赋予一个独一无二的uuid编码": "Assign a unique UUID code to each visiting user",
"清空label": "Clear label",
"对话助手函数插件": "Conversation assistant function plugin",
"Chuanhu-Small-and-Beautiful主题": "Chuanhu-Small-and-Beautiful theme",
"初始化音频采集线程": "Initialize the audio capture thread",
"“我们称之为高效”这 一用语来源于群星": "The phrase 'we call it efficient' comes from the stars",
"给观众留下了等待的时间过长的印象": "Leaving the audience with the impression of waiting for too long",
"在一个异步线程中采集音频": "Collect audio in an asynchronous thread",
"正在听您讲话": "Listening to you speak",
"指游戏中玩家中独来独往、游离于队伍之外的“自由人”或玩得比较菜或者玩得比较阴险的人": "Refers to players in the game who are independent, play poorly, or play cunningly and stay away from the team",
"函数插件模板Demo": "Function plugin template Demo",
"比如巨像": "Such as giant",
"格式如org-123456789abcdefghijklmno的": "The format is like org-123456789abcdefghijklmno",
"插件锁定中": "Plugin locked",
"/* 去掉列表前缀的默认间距": "/* Remove the default spacing of the list prefix",
"内部单元格之间边框合并": "Merge borders between internal cells",
"可以将自身的状态存储到cookie中": "Can store its own state in a cookie",
"解除插件锁定": "Unlock plugin",
"/* 设定代码块的样式": "/* Set the style of the code block",
"右下角更换模型菜单中可切换openai": "You can switch openai in the lower right corner model menu",
"颜色为--border-color-primary. */": "The color is --border-color-primary. */",
"引用了一群游戏玩家对于需要对P社玩家进行枪毙的讨论": "Quoted a discussion among a group of gamers about the need to execute P community players",
"这个话题的本质是玩家们对于P 社游戏中的政治与历史元素的不同看法": "The essence of this topic is the different views of players on the political and historical elements in P community games",
"我将为您查找相关壁纸": "I will find related wallpapers for you",
"640个字节为一组": "640 bytes per group",
"赋予插件锁定 锁定插件回调路径": "Assign plugin lock, lock plugin callback path",
"等游戏": "Waiting for the game",
"将文件添加到chatbot cookie中": "Add file to chatbot cookie",
"处理个别特殊插件的锁定状态": "Handle the lock status of individual special plugins",
"/* 设置表头背景颜色为rgba": "/* Set the background color of the table header to rgba",
"读 docs\\use_azure.md": "Read docs\\use_azure.md",
"边框粗细为1.2px": "The border thickness is 1.2px",
"这个用语最初可能是在群星": "This term may have originated in the stars",
"请输入关键词": "Please enter keywords",
"给出实现的步骤和实现细节": "Provide implementation steps and details",
"可以得知失败的man是指一位在B站购买了蜘蛛侠COS服后穿上后被网友嘲笑的UP主": "The 'failed man' refers to a UP main who bought a Spiderman COS costume on Bilibili and was mocked by netizens after wearing it.",
"最近它在GitHub上爆火": "Recently, it has become popular on GitHub.",
"提取总结": "Extract summary",
"正在锁定插件": "Locking the plugin",
"给出指令": "Give instructions",
"避免遗忘导致死锁": "Avoid forgetting causing deadlock",
"建议直接在API_KEY处填写": "It is recommended to fill in directly at API_KEY",
"详情见https": "For details, see https",
"因此有人就以枪毙这些玩家来回应此类言论": "Therefore, some people respond to such comments by executing these players",
"而“失败的man”是蜘蛛侠英文名“spiderman”的谐音梗": "And 'failed man' is a pun on the English name 'Spiderman'",
"格式如org-xxxxxxxxxxxxxxxxxxxxxxxx": "The format is like org-xxxxxxxxxxxxxxxxxxxxxxxx",
"根据第1份搜索结果": "According to the first search result",
"GPT 学术优化": "GPT academic optimization",
"等待用户的再次调用": "Waiting for user's call again",
"把文件复制过去": "Copy the file over",
"该选项即将被弃用": "This option will be deprecated",
"对这个人外貌、身处的环境、内心世界、人设进行描写": "Describe this person's appearance, environment, inner world, and character setting",
"例如您可以将以下命令复制到下方": "For example, you can copy the following command below",
"使用": "Use",
"找不到": "Not found",
"正常状态": "Normal state",
"记住当前的label": "Remember the current label",
"成为了业内最热门的项目之一": "Has become one of the hottest projects in the industry",
"它可以根据用户需求自主执行任务": "It can independently perform tasks according to user needs",
"时而快时而慢": "Sometimes fast, sometimes slow",
"它们都是关于一个知乎用户所发的帖子": "They are all about a post made by a Zhihu user",
"解决插件锁定时的界面显示问题": "Resolve interface display issues when the plugin is locked",
"azure和api2d请求源": "Azure and api2d request sources",
"并不应该被当做真实的态度或者观点": "Should not be taken as a real attitude or viewpoint",
"没有阿里云语音识别APPKEY和TOKEN": "No Alibaba Cloud Speech Recognition APPKEY and TOKEN",
"Rainbow Six Siege 游戏中 Smoke 的 Canister 中装有何种物质相关的官方信息": "Official information related to the substance contained in Smoke's Canister in the game Rainbow Six Siege",
"此处填API密钥": "Fill in API key here",
"使其与文本线对齐. */": "Align it with the text line. */",
"先删除": "Delete first",
"100字以内": "Within 100 characters",
"并完全不需要用户插手": "And does not require user intervention at all",
"插件可读取“输入区”文本/路径作为参数": "The plugin can read the text/path in the 'input area' as a parameter",
"但是这个话题本身并没有实质内容": "But this topic itself has no substantive content",
"会直接转到该函数": "Will directly go to that function",
"因此这种说法没有实际意义": "Therefore, this statement has no practical meaning",
"它可以自己思考": "It can think for itself",
"/* 行内代码的背景设为淡灰色": "/* Set the background of inline code to light gray",
"请直接提交即可": "Please submit directly",
"计算欧几里得距离矩阵": "Calculate the Euclidean distance matrix",
"空单元格显示. */": "Display empty cells. */",
"openai的官方KEY需要伴随组织编码": "The official KEY of OpenAI needs to be accompanied by organizational coding",
"最近在中国的斡旋下": "Recently, under the mediation of China",
"将子线程的gpt结果写入chatbot": "Write the GPT results of the sub-thread into the chatbot",
"罗小黑战记的更新时间不定": "The update time of Luo Xiaohei's Adventure is uncertain",
"/* 设置表格单元格的内边距为5px": "/* Set the inner padding of table cells to 5px",
"透明度为0.2. */": "Opacity is 0.2. */",
"暂不提交": "Do not submit temporarily",
"用户们用来形容一些游戏策略或行为非常高效且能够带来好的效果的用语": "Terms used by users to describe game strategies or behaviors that are very efficient and can bring good results",
"等待GPT响应": "Waiting for GPT response",
"计算输入数组X中所有样本点的两两距离矩阵": "Calculate the pairwise distance matrix of all sample points in the input array X",
"因此": "Therefore",
"包括圆角、最大宽度和阴影等. */": "Including rounded corners, maximum width, and shadows, etc. */",
"初始化插件状态": "Initialize plugin status",
"请讲话": "Please speak",
"可选": "Optional",
"甚至可以自问自答执 行任务": "You can even ask and answer questions to perform tasks",
"add gpt task 创建子线程请求gpt": "add gpt task Create a sub-thread request gpt",
"失败的man是指这位UP主在穿上蜘蛛侠COS服后被网友嘲笑的情况": "The failed man refers to the situation where this UP master was mocked by netizens after wearing Spider-Man COS clothes",
"这个游戏里面流行起来的": "Popular in this game",
"获取关键词": "Get keywords",
"设定圆角和间距. */": "Set rounded corners and spacing. */",
"/* 设置表格的外边距为1em": "/* Set the margin of the table to 1em",
"只是一个玩笑或者恶搞": "Just a joke or a prank",
"双手离开鼠标键盘吧": "Take your hands off the mouse and keyboard",
"上传文件自动修正路径": "Automatically correct the path when uploading files",
"找不到任何Rect文件": "Cannot find any Rect files",
"先上传数据集": "Upload the dataset first",
"如果已经存在": "If it already exists",
"使用时": "When using",
"解除插件状态": "Release plugin status",
"cially” 转换为 “Especially”": "Convert 'cially' to 'Especially'",
"这表明两国关系已经重新回到正常化状态": "This indicates that the relationship between the two countries has returned to a normal state",
"由于提问含不合规内容被Azure过滤": "Due to the Azure filtering of questions containing non-compliant content",
"填入你亲手写的部署名": "Fill in the deployment name you wrote by hand",
"想象一个穿着者": "Imagine a wearer",
"建议使用英文单词": "It is recommended to use English words",
"没给定指令": "No instruction given",
"实时音频采集": "Real-time audio collection",
"“我们称之为高效”是指在游戏社区中": "'We call it efficient' refers to the game community",
"找 API_ORG 设置项": "Find API_ORG settings",
"虚空终端": "VoidTerminal",
"请查看terminal的输出或耐心等待": "Please check the output of the terminal or wait patiently",
"发送到openai音频解析terminal": "Send to openai audio parsing terminal",
"超级terminal": "Super terminal"
}

119
docs/use_azure.md Normal file
View File

@ -0,0 +1,119 @@
# 通过微软Azure云服务申请 Openai API
由于Openai和微软的关系现在是可以通过微软的Azure云计算服务直接访问openai的api免去了注册和网络的问题。
快速入门的官方文档的链接是:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python)
# 申请API
按文档中的“先决条件”的介绍,出了编程的环境以外,还需要以下三个条件:
1.  Azure账号并创建订阅
2.  为订阅添加Azure OpenAI 服务
3.  部署模型
## Azure账号并创建订阅
### Azure账号
创建Azure的账号时最好是有微软的账号这样似乎更容易获得免费额度第一个月的200美元实测了一下如果用一个刚注册的微软账号登录Azure的话并没有这一个月的免费额度
创建Azure账号的网址是[立即创建 Azure 免费帐户 | Microsoft Azure](https://azure.microsoft.com/zh-cn/free/)
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_944786_iH6AECuZ_tY0EaBd_1685327219?w=1327\&h=695\&type=image/png)
打开网页后,点击 “免费开始使用” 会跳转到登录或注册页面,如果有微软的账户,直接登录即可,如果没有微软账户,那就需要到微软的网页再另行注册一个。
注意Azure的页面和政策时不时会变化已实际最新显示的为准就好。
### 创建订阅
注册好Azure后便可进入主页
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_444847_tk-9S-pxOYuaLs_K_1685327675?w=1865\&h=969\&type=image/png)
首先需要在订阅里进行添加操作,点开后即可进入订阅的页面:
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_612820_z_1AlaEgnJR-rUl0_1685327892?w=1865\&h=969\&type=image/png)
第一次进来应该是空的点添加即可创建新的订阅可以是“免费”或者“即付即用”的订阅其中订阅ID是后面申请Azure OpenAI需要使用的。
## 为订阅添加Azure OpenAI服务
之后回到首页点Azure OpenAI即可进入OpenAI服务的页面如果不显示的话则在首页上方的搜索栏里搜索“openai”即可
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_269759_nExkGcPC0EuAR5cp_1685328130?w=1865\&h=969\&type=image/png)
不过现在这个服务还不能用。在使用前,还需要在这个网址申请一下:
[Request Access to Azure OpenAI Service (microsoft.com)](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xUOFA5Qk1UWDRBMjg0WFhPMkIzTzhKQ1dWNyQlQCN0PWcu)
这里有二十来个问题,按照要求和自己的实际情况填写即可。
其中需要注意的是
1.  千万记得填对"订阅ID"
2.  需要填一个公司邮箱(可以不是注册用的邮箱)和公司网址
之后,在回到上面那个页面,点创建,就会进入创建页面了:
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_72708_9d9JYhylPVz3dFWL_1685328372?w=824\&h=590\&type=image/png)
需要填入“资源组”和“名称”,按照自己的需要填入即可。
完成后,在主页的“资源”里就可以看到刚才创建的“资源”了,点击进入后,就可以进行最后的部署了。
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_871541_CGCnbgtV9Uk1Jccy_1685329861?w=1217\&h=628\&type=image/png)
## 部署模型
进入资源页面后,在部署模型前,可以先点击“开发”,把密钥和终结点记下来。
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_852567_dxCZOrkMlWDSLH0d_1685330736?w=856\&h=568\&type=image/png)
之后,就可以去部署模型了,点击“部署”即可,会跳转到 Azure OpenAI Stuido 进行下面的操作:
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_169225_uWs1gMhpNbnwW4h2_1685329901?w=1865\&h=969\&type=image/png)
进入 Azure OpenAi Studio 后,点击新建部署,会弹出如下对话框:
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_391255_iXUSZAzoud5qlxjJ_1685330224?w=656\&h=641\&type=image/png)
在这里选 gpt-35-turbo 或需要的模型并按需要填入“部署名”即可完成模型的部署。
![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_724099_vBaHcUilsm1EtPgK_1685330396?w=1869\&h=482\&type=image/png)
这个部署名需要记下来。
到现在为止,申请操作就完成了,需要记下来的有下面几个东西:
 密钥对应AZURE_API_KEY1或2都可以
● 终结点 对应AZURE_ENDPOINT
 部署名对应AZURE_ENGINE不是模型名
# 修改 config.py
```
LLM_MODEL = "azure-gpt-3.5" # 指定启动时的默认模型当然事后从下拉菜单选也ok
AZURE_ENDPOINT = "填入终结点" # 见上述图片
AZURE_API_KEY = "填入azure openai api的密钥"
AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改
AZURE_ENGINE = "填入部署名" # 见上述图片
```
# 关于费用
Azure OpenAI API 还是需要一些费用的免费订阅只有1个月有效期
具体可以可以看这个网址 [Azure OpenAI 服务 - 定价| Microsoft Azure](https://azure.microsoft.com/zh-cn/pricing/details/cognitive-services/openai-service/?cdn=disable)
并非网上说的什么“一年白嫖”但注册方法以及网络问题都比直接使用openai的api要简单一些。

12021
ft_ds.json Normal file

File diff suppressed because it is too large Load Diff

87
main.py
View File

@ -2,24 +2,25 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
def main():
import gradio as gr
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "用 pip install -r requirements.txt 安装依赖"
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖详情信息见requirements.txt"
from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS')
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
ENABLE_AUDIO, AUTO_CLEAR_TXT = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT')
# 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
if not AUTHENTICATION: AUTHENTICATION = None
from check_proxy import get_current_version
from theme.theme import adjust_theme, advanced_css, theme_declaration
initial_prompt = "Serve me as a writing and programming assistant."
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
# 问询记录, python 版本建议3.9+(越新越好)
import logging
import logging, uuid
os.makedirs("gpt_log", exist_ok=True)
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
@ -37,7 +38,6 @@ def main():
gr.Chatbot.postprocess = format_io
# 做一些外观色彩上的调整
from theme import adjust_theme, advanced_css
set_theme = adjust_theme()
# 代理与自动更新
@ -45,23 +45,23 @@ def main():
proxy_info = check_proxy(proxies)
gr_L1 = lambda: gr.Row().style()
gr_L2 = lambda scale: gr.Column(scale=scale)
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id)
if LAYOUT == "TOP-DOWN":
gr_L1 = lambda: DummyWith()
gr_L2 = lambda scale: gr.Row()
gr_L2 = lambda scale, elem_id: gr.Row()
CHATBOT_HEIGHT /= 2
cancel_handles = []
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
gr.HTML(title_html)
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
cookies = gr.State(load_chat_cookies())
with gr_L1():
with gr_L2(scale=2):
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}")
chatbot.style(height=CHATBOT_HEIGHT)
with gr_L2(scale=2, elem_id="gpt-chat"):
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
history = gr.State([])
with gr_L2(scale=1):
with gr.Accordion("输入区", open=True) as area_input_primary:
with gr_L2(scale=1, elem_id="gpt-panel"):
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
with gr.Row():
@ -70,17 +70,20 @@ def main():
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
if ENABLE_AUDIO:
with gr.Row():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False)
with gr.Row():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
with gr.Row():
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant)
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
with gr.Row():
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
with gr.Row():
for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue
@ -91,25 +94,25 @@ def main():
with gr.Accordion("更多函数插件", open=True):
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
with gr.Row():
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
with gr.Row():
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
with gr.Row():
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
with gr.Row():
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")):
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN"), elem_id="interact-panel"):
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="Local LLM MaxLength",)
max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
gr.Markdown(description)
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
with gr.Row():
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
with gr.Row():
@ -144,6 +147,11 @@ def main():
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
clearBtn.click(lambda: ("",""), None, [txt, txt2])
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
if AUTO_CLEAR_TXT:
submitBtn.click(lambda: ("",""), None, [txt, txt2])
submitBtn2.click(lambda: ("",""), None, [txt, txt2])
txt.submit(lambda: ("",""), None, [txt, txt2])
txt2.submit(lambda: ("",""), None, [txt, txt2])
# 基础功能区的回调函数注册
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
@ -155,7 +163,7 @@ def main():
for k in crazy_fns:
if not crazy_fns[k].get("AsButton", True): continue
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
cancel_handles.append(click_handle)
# 函数插件-下拉菜单与随变按钮的互动
def on_dropdown_changed(k):
@ -171,15 +179,28 @@ def main():
return {chatbot: gr.update(label="当前模型:"+k)}
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
# 随变按钮的回调函数注册
def route(k, *args, **kwargs):
def route(request: gr.Request, k, *args, **kwargs):
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(request, *args, **kwargs)
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
cancel_handles.append(click_handle)
# 终止按钮的回调函数注册
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
if ENABLE_AUDIO:
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
rad = RealtimeAudioDistribution()
def deal_audio(audio, cookies):
rad.feed(cookies['uuid'].hex, audio)
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
def init_cookie(cookies, chatbot):
# 为每一位访问的用户赋予一个独一无二的uuid编码
cookies.update({'uuid': uuid.uuid4()})
return cookies
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{ChatBotHeight();}')
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def auto_opentab_delay():
@ -197,7 +218,10 @@ def main():
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
auto_opentab_delay()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
server_name="0.0.0.0", server_port=PORT,
favicon_path="docs/logo.png", auth=AUTHENTICATION,
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
# 如果需要在二级路径下运行
# CUSTOM_PATH, = get_conf('CUSTOM_PATH')
@ -205,7 +229,8 @@ def main():
# from toolbox import run_gradio_in_subpath
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
# else:
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
if __name__ == "__main__":
main()

View File

@ -33,7 +33,7 @@ import pickle
import time
CACHE_FOLDER = "gpt_log"
blacklist = ['multi-language', 'gpt_log', '.git', 'private_upload', 'multi_language.py']
blacklist = ['multi-language', 'gpt_log', '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv']
# LANG = "TraditionalChinese"
# TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #."
@ -301,6 +301,7 @@ def step_1_core_key_translate():
elif isinstance(node, ast.ImportFrom):
for n in node.names:
if contains_chinese(n.name): syntax.append(n.name)
# if node.module is None: print(node.module)
for k in node.module.split('.'):
if contains_chinese(k): syntax.append(k)
return syntax
@ -310,6 +311,7 @@ def step_1_core_key_translate():
for root, dirs, files in os.walk(directory_path):
if any([b in root for b in blacklist]):
continue
print(files)
for file in files:
if file.endswith('.py'):
file_path = os.path.join(root, file)
@ -505,6 +507,6 @@ def step_2_core_key_translate():
with open(file_path_new, 'w', encoding='utf-8') as f:
f.write(content)
os.remove(file_path)
step_1_core_key_translate()
step_2_core_key_translate()
print('Finished, checkout generated results at ./multi-language/')

BIN
objdump.tmp Normal file

Binary file not shown.

View File

@ -19,9 +19,6 @@ from .bridge_chatgpt import predict as chatgpt_ui
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
from .bridge_chatglm import predict as chatglm_ui
from .bridge_newbing import predict_no_ui_long_connection as newbing_noui
from .bridge_newbing import predict as newbing_ui
# from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
# from .bridge_tgui import predict as tgui_ui
@ -48,10 +45,11 @@ class LazyloadTiktoken(object):
return encoder.decode(*args, **kwargs)
# Endpoint 重定向
API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "AZURE_ENDPOINT", "AZURE_ENGINE")
openai_endpoint = "https://api.openai.com/v1/chat/completions"
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
# 兼容旧版的配置
try:
API_URL, = get_conf("API_URL")
@ -84,6 +82,33 @@ model_info = {
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-16k": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 1024*16,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-0613": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-16k-0613": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 1024 * 16,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-4": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@ -93,6 +118,16 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
# azure openai
"azure-gpt-3.5":{
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": azure_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# api_2d
"api2d-gpt-3.5-turbo": {
"fn_with_ui": chatgpt_ui,
@ -112,7 +147,7 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
# chatglm
# chatglm 直接对齐到 chatglm2
"chatglm": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
@ -121,12 +156,11 @@ model_info = {
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# newbing
"newbing": {
"fn_with_ui": newbing_ui,
"fn_without_ui": newbing_noui,
"endpoint": newbing_endpoint,
"max_token": 4096,
"chatglm2": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
@ -218,6 +252,41 @@ if "newbing-free" in AVAIL_LLM_MODELS:
})
except:
print(trimmed_format_exc())
if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
try:
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
from .bridge_newbingfree import predict as newbingfree_ui
# claude
model_info.update({
"newbing": {
"fn_with_ui": newbingfree_ui,
"fn_without_ui": newbingfree_noui,
"endpoint": newbing_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
try:
from .bridge_chatglmft import predict_no_ui_long_connection as chatglmft_noui
from .bridge_chatglmft import predict as chatglmft_ui
# claude
model_info.update({
"chatglmft": {
"fn_with_ui": chatglmft_ui,
"fn_without_ui": chatglmft_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
print(trimmed_format_exc())
def LLM_CATCH_EXCEPTION(f):
"""
@ -321,6 +390,6 @@ def predict(inputs, llm_kwargs, *args, **kwargs):
additional_fn代表点击的哪个按钮按钮见functional.py
"""
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"]
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错检查config中的AVAIL_LLM_MODELS选项
yield from method(inputs, llm_kwargs, *args, **kwargs)

View File

@ -40,12 +40,12 @@ class GetGLMHandle(Process):
while True:
try:
if self.chatglm_model is None:
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
device, = get_conf('LOCAL_MODEL_DEVICE')
if device=='cpu':
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float()
else:
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
self.chatglm_model = self.chatglm_model.eval()
break
else:

View File

@ -0,0 +1,210 @@
from transformers import AutoModel, AutoTokenizer
import time
import os
import json
import threading
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
load_message = "ChatGLMFT尚未加载加载需要一段时间。注意取决于`config.py`的配置ChatGLMFT消耗大量的内存CPU或显存GPU也许会导致低配计算机卡死 ……"
def string_to_options(arguments):
import argparse
import shlex
# Create an argparse.ArgumentParser instance
parser = argparse.ArgumentParser()
# Add command-line arguments
parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
parser.add_argument("--batch", type=int, help="System prompt", default=50)
# Parse the arguments
args = parser.parse_args(shlex.split(arguments))
return args
#################################################################################
class GetGLMFTHandle(Process):
def __init__(self):
super().__init__(daemon=True)
self.parent, self.child = Pipe()
self.chatglmft_model = None
self.chatglmft_tokenizer = None
self.info = ""
self.success = True
self.check_dependency()
self.start()
self.threadLock = threading.Lock()
def check_dependency(self):
try:
import sentencepiece
self.info = "依赖检测通过"
self.success = True
except:
self.info = "缺少ChatGLMFT的依赖如果要使用ChatGLMFT除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
self.success = False
def ready(self):
return self.chatglmft_model is not None
def run(self):
# 子进程执行
# 第一次运行,加载参数
retry = 0
while True:
try:
if self.chatglmft_model is None:
from transformers import AutoConfig
import torch
# conf = 'request_llm/current_ptune_model.json'
# if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息')
# with open(conf, 'r', encoding='utf8') as f:
# model_args = json.loads(f.read())
ChatGLM_PTUNING_CHECKPOINT, = get_conf('ChatGLM_PTUNING_CHECKPOINT')
conf = os.path.join(ChatGLM_PTUNING_CHECKPOINT, "config.json")
with open(conf, 'r', encoding='utf8') as f:
model_args_ = json.loads(f.read())
model_args_.update(model_args)
model_args = model_args_
self.chatglmft_tokenizer = AutoTokenizer.from_pretrained(
model_args['model_name_or_path'], trust_remote_code=True)
config = AutoConfig.from_pretrained(
model_args['model_name_or_path'], trust_remote_code=True)
config.pre_seq_len = model_args['pre_seq_len']
config.prefix_projection = model_args['prefix_projection']
print(f"Loading prefix_encoder weight from {ChatGLM_PTUNING_CHECKPOINT}")
model = AutoModel.from_pretrained(model_args['model_name_or_path'], config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(ChatGLM_PTUNING_CHECKPOINT, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
if model_args['quantization_bit'] is not None:
print(f"Quantized to {model_args['quantization_bit']} bit")
model = model.quantize(model_args['quantization_bit'])
model = model.cuda()
if model_args['pre_seq_len'] is not None:
# P-tuning v2
model.transformer.prefix_encoder.float()
self.chatglmft_model = model.eval()
break
else:
break
except Exception as e:
retry += 1
if retry > 3:
self.child.send('[Local Message] Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数。')
raise RuntimeError("不能正常加载ChatGLMFT的参数")
while True:
# 进入任务等待状态
kwargs = self.child.recv()
# 收到消息,开始请求
try:
for response, history in self.chatglmft_model.stream_chat(self.chatglmft_tokenizer, **kwargs):
self.child.send(response)
# # 中途接收可能的终止指令(如果有的话)
# if self.child.poll():
# command = self.child.recv()
# if command == '[Terminate]': break
except:
from toolbox import trimmed_format_exc
self.child.send('[Local Message] Call ChatGLMFT fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
# 请求处理结束,开始下一个循环
self.child.send('[Finish]')
def stream_chat(self, **kwargs):
# 主进程执行
self.threadLock.acquire()
self.parent.send(kwargs)
while True:
res = self.parent.recv()
if res != '[Finish]':
yield res
else:
break
self.threadLock.release()
global glmft_handle
glmft_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
"""
global glmft_handle
if glmft_handle is None:
glmft_handle = GetGLMFTHandle()
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glmft_handle.info
if not glmft_handle.success:
error = glmft_handle.info
glmft_handle = None
raise RuntimeError(error)
# chatglmft 没有 sys_prompt 接口因此把prompt加入 history
history_feedin = []
history_feedin.append(["What can I do?", sys_prompt])
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
if len(observe_window) >= 1: observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
"""
chatbot.append((inputs, ""))
global glmft_handle
if glmft_handle is None:
glmft_handle = GetGLMFTHandle()
chatbot[-1] = (inputs, load_message + "\n\n" + glmft_handle.info)
yield from update_ui(chatbot=chatbot, history=[])
if not glmft_handle.success:
glmft_handle = None
return
if additional_fn is not None:
import core_functional
importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
# 处理历史信息
history_feedin = []
history_feedin.append(["What can I do?", system_prompt] )
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
# 开始接收chatglmft的回复
response = "[Local Message]: 等待ChatGLMFT响应中 ..."
for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == "[Local Message]: 等待ChatGLMFT响应中 ...":
response = "[Local Message]: ChatGLMFT响应异常 ..."
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)

View File

@ -22,8 +22,8 @@ import importlib
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控如果有则覆盖原config文件
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
@ -101,6 +101,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
else: raise RuntimeError("意外Json结构"+delta)
if json_data['finish_reason'] == 'content_filter':
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("正常结束但显示Token不足导致输出不完整请削减单次输入的文本量。")
return result
@ -205,6 +207,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
error_msg = chunk_decoded
openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup'
if "reduce the length" in error_msg:
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入history[-2] 是本次输入, history[-1] 是本次输出
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
@ -214,9 +217,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
elif "does not exist" in error_msg:
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
elif "Incorrect API key" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务.")
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website)
elif "exceeded your current quota" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务.")
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website)
elif "account is not active" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website)
elif "associated with a deactivated account" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website)
elif "bad forward key" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
elif "Not enough point" in error_msg:
@ -241,6 +248,8 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
if API_ORG.startswith('org-'): headers.update({"OpenAI-Organization": API_ORG})
if llm_kwargs['llm_model'].startswith('azure-'): headers.update({"api-key": api_key})
conversation_cnt = len(history) // 2

View File

@ -1,254 +0,0 @@
"""
========================================================================
第一部分来自EdgeGPT.py
https://github.com/acheong08/EdgeGPT
========================================================================
"""
from .edge_gpt import NewbingChatbot
load_message = "等待NewBing响应。"
"""
========================================================================
第二部分子进程Worker调用主体
========================================================================
"""
import time
import json
import re
import logging
import asyncio
import importlib
import threading
from toolbox import update_ui, get_conf, trimmed_format_exc
from multiprocessing import Process, Pipe
def preprocess_newbing_out(s):
pattern = r'\^(\d+)\^' # 匹配^数字^
sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
result = re.sub(pattern, sub, s) # 替换操作
if '[1]' in result:
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
return result
def preprocess_newbing_out_simple(result):
if '[1]' in result:
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
return result
class NewBingHandle(Process):
def __init__(self):
super().__init__(daemon=True)
self.parent, self.child = Pipe()
self.newbing_model = None
self.info = ""
self.success = True
self.local_history = []
self.check_dependency()
self.start()
self.threadLock = threading.Lock()
def check_dependency(self):
try:
self.success = False
import certifi, httpx, rich
self.info = "依赖检测通过等待NewBing响应。注意目前不能多人同时调用NewBing接口有线程锁否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时会自动使用已配置的代理。"
self.success = True
except:
self.info = "缺少的依赖如果要使用Newbing除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
self.success = False
def ready(self):
return self.newbing_model is not None
async def async_run(self):
# 读取配置
NEWBING_STYLE, = get_conf('NEWBING_STYLE')
from request_llm.bridge_all import model_info
endpoint = model_info['newbing']['endpoint']
while True:
# 等待
kwargs = self.child.recv()
question=kwargs['query']
history=kwargs['history']
system_prompt=kwargs['system_prompt']
# 是否重置
if len(self.local_history) > 0 and len(history)==0:
await self.newbing_model.reset()
self.local_history = []
# 开始问问题
prompt = ""
if system_prompt not in self.local_history:
self.local_history.append(system_prompt)
prompt += system_prompt + '\n'
# 追加历史
for ab in history:
a, b = ab
if a not in self.local_history:
self.local_history.append(a)
prompt += a + '\n'
# if b not in self.local_history:
# self.local_history.append(b)
# prompt += b + '\n'
# 问题
prompt += question
self.local_history.append(question)
print('question:', prompt)
# 提交
async for final, response in self.newbing_model.ask_stream(
prompt=question,
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
):
if not final:
print(response)
self.child.send(str(response))
else:
print('-------- receive final ---------')
self.child.send('[Finish]')
# self.local_history.append(response)
def run(self):
"""
这个函数运行在子进程
"""
# 第一次运行,加载参数
self.success = False
self.local_history = []
if (self.newbing_model is None) or (not self.success):
# 代理设置
proxies, = get_conf('proxies')
if proxies is None:
self.proxies_https = None
else:
self.proxies_https = proxies['https']
# cookie
NEWBING_COOKIES, = get_conf('NEWBING_COOKIES')
try:
cookies = json.loads(NEWBING_COOKIES)
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] 不能加载Newbing组件。NEWBING_COOKIES未填写或有格式错误。')
self.child.send('[Fail]')
self.child.send('[Finish]')
raise RuntimeError(f"不能加载Newbing组件。NEWBING_COOKIES未填写或有格式错误。")
try:
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] 不能加载Newbing组件。{tb_str}')
self.child.send('[Fail]')
self.child.send('[Finish]')
raise RuntimeError(f"不能加载Newbing组件。")
self.success = True
try:
# 进入任务等待状态
asyncio.run(self.async_run())
except Exception:
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
self.child.send('[Fail]')
self.child.send('[Finish]')
def stream_chat(self, **kwargs):
"""
这个函数运行在主进程
"""
self.threadLock.acquire()
self.parent.send(kwargs) # 发送请求到子进程
while True:
res = self.parent.recv() # 等待newbing回复的片段
if res == '[Finish]':
break # 结束
elif res == '[Fail]':
self.success = False
break
else:
yield res # newbing回复的片段
self.threadLock.release()
"""
========================================================================
第三部分:主进程统一调用函数接口
========================================================================
"""
global newbing_handle
newbing_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
"""
global newbing_handle
if (newbing_handle is None) or (not newbing_handle.success):
newbing_handle = NewBingHandle()
observe_window[0] = load_message + "\n\n" + newbing_handle.info
if not newbing_handle.success:
error = newbing_handle.info
newbing_handle = None
raise RuntimeError(error)
# 没有 sys_prompt 接口因此把prompt加入 history
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
observe_window[0] = "[Local Message]: 等待NewBing响应中 ..."
for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
observe_window[0] = preprocess_newbing_out_simple(response)
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return preprocess_newbing_out_simple(response)
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
"""
chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ..."))
global newbing_handle
if (newbing_handle is None) or (not newbing_handle.success):
newbing_handle = NewBingHandle()
chatbot[-1] = (inputs, load_message + "\n\n" + newbing_handle.info)
yield from update_ui(chatbot=chatbot, history=[])
if not newbing_handle.success:
newbing_handle = None
return
if additional_fn is not None:
import core_functional
importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...")
response = "[Local Message]: 等待NewBing响应中 ..."
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, preprocess_newbing_out(response))
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常请刷新界面重试 ..."
history.extend([inputs, response])
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {response}')
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")

View File

@ -89,9 +89,6 @@ class NewBingHandle(Process):
if a not in self.local_history:
self.local_history.append(a)
prompt += a + '\n'
# if b not in self.local_history:
# self.local_history.append(b)
# prompt += b + '\n'
# 问题
prompt += question
@ -121,14 +118,26 @@ class NewBingHandle(Process):
self.local_history = []
if (self.newbing_model is None) or (not self.success):
# 代理设置
proxies, = get_conf('proxies')
proxies, NEWBING_COOKIES = get_conf('proxies', 'NEWBING_COOKIES')
if proxies is None:
self.proxies_https = None
else:
self.proxies_https = proxies['https']
if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
try:
self.newbing_model = NewbingChatbot(proxy=self.proxies_https)
cookies = json.loads(NEWBING_COOKIES)
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] NEWBING_COOKIES未填写或有格式错误。')
self.child.send('[Fail]'); self.child.send('[Finish]')
raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
else:
cookies = None
try:
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
@ -143,7 +152,7 @@ class NewBingHandle(Process):
asyncio.run(self.async_run())
except Exception:
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
self.child.send(f'[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题建议更换代理协议推荐http或代理节点 {tb_str}.')
self.child.send('[Fail]')
self.child.send('[Finish]')
@ -151,18 +160,14 @@ class NewBingHandle(Process):
"""
这个函数运行在主进程
"""
self.threadLock.acquire()
self.parent.send(kwargs) # 发送请求子进程
self.threadLock.acquire() # 获取线程锁
self.parent.send(kwargs) # 请求子进程
while True:
res = self.parent.recv() # 等待newbing回复的片段
if res == '[Finish]':
break # 结束
elif res == '[Fail]':
self.success = False
break
else:
yield res # newbing回复的片段
self.threadLock.release()
if res == '[Finish]': break # 结束
elif res == '[Fail]': self.success = False; break # 失败
else: yield res # newbing回复的片段
self.threadLock.release() # 释放线程锁
"""

View File

@ -1,4 +1,4 @@
from .bridge_newbing import preprocess_newbing_out, preprocess_newbing_out_simple
from .bridge_newbingfree import preprocess_newbing_out, preprocess_newbing_out_simple
from multiprocessing import Process, Pipe
from toolbox import update_ui, get_conf, trimmed_format_exc
import threading

View File

@ -0,0 +1,5 @@
{
"model_name_or_path": "THUDM/chatglm2-6b",
"pre_seq_len": "128",
"ptuning_checkpoint": "/home/hmp/ChatGLM2-6B/ptuning/output/clothgen-chatglm2-6b-pt-128-2e-2/checkpoint-100"
}

View File

@ -1,409 +0,0 @@
"""
========================================================================
第一部分来自EdgeGPT.py
https://github.com/acheong08/EdgeGPT
========================================================================
"""
import argparse
import asyncio
import json
import os
import random
import re
import ssl
import sys
import uuid
from enum import Enum
from typing import Generator
from typing import Literal
from typing import Optional
from typing import Union
import websockets.client as websockets
DELIMITER = "\x1e"
# Generate random IP between range 13.104.0.0/14
FORWARDED_IP = (
f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
)
HEADERS = {
"accept": "application/json",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
"sec-ch-ua-arch": '"x86"',
"sec-ch-ua-bitness": '"64"',
"sec-ch-ua-full-version": '"109.0.1518.78"',
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": "",
"sec-ch-ua-platform": '"Windows"',
"sec-ch-ua-platform-version": '"15.0.0"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-ms-client-request-id": str(uuid.uuid4()),
"x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
"Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
"Referrer-Policy": "origin-when-cross-origin",
"x-forwarded-for": FORWARDED_IP,
}
HEADERS_INIT_CONVER = {
"authority": "edgeservices.bing.com",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "en-US,en;q=0.9",
"cache-control": "max-age=0",
"sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
"sec-ch-ua-arch": '"x86"',
"sec-ch-ua-bitness": '"64"',
"sec-ch-ua-full-version": '"110.0.1587.69"',
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": '""',
"sec-ch-ua-platform": '"Windows"',
"sec-ch-ua-platform-version": '"15.0.0"',
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
"x-edge-shopping-flag": "1",
"x-forwarded-for": FORWARDED_IP,
}
def get_ssl_context():
import certifi
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
return ssl_context
class NotAllowedToAccess(Exception):
pass
class ConversationStyle(Enum):
creative = "h3imaginative,clgalileo,gencontentv3"
balanced = "galileo"
precise = "h3precise,clgalileo"
CONVERSATION_STYLE_TYPE = Optional[
Union[ConversationStyle, Literal["creative", "balanced", "precise"]]
]
def _append_identifier(msg: dict) -> str:
"""
Appends special character to end of message to identify end of message
"""
# Convert dict to json string
return json.dumps(msg) + DELIMITER
def _get_ran_hex(length: int = 32) -> str:
"""
Returns random hex string
"""
return "".join(random.choice("0123456789abcdef") for _ in range(length))
class _ChatHubRequest:
"""
Request object for ChatHub
"""
def __init__(
self,
conversation_signature: str,
client_id: str,
conversation_id: str,
invocation_id: int = 0,
) -> None:
self.struct: dict = {}
self.client_id: str = client_id
self.conversation_id: str = conversation_id
self.conversation_signature: str = conversation_signature
self.invocation_id: int = invocation_id
def update(
self,
prompt,
conversation_style,
options,
) -> None:
"""
Updates request object
"""
if options is None:
options = [
"deepleo",
"enable_debug_commands",
"disable_emoji_spoken_text",
"enablemm",
]
if conversation_style:
if not isinstance(conversation_style, ConversationStyle):
conversation_style = getattr(ConversationStyle, conversation_style)
options = [
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
conversation_style.value,
"dtappid",
"cricinfo",
"cricinfov2",
"dv3sugg",
]
self.struct = {
"arguments": [
{
"source": "cib",
"optionsSets": options,
"sliceIds": [
"222dtappid",
"225cricinfo",
"224locals0",
],
"traceId": _get_ran_hex(32),
"isStartOfSession": self.invocation_id == 0,
"message": {
"author": "user",
"inputMethod": "Keyboard",
"text": prompt,
"messageType": "Chat",
},
"conversationSignature": self.conversation_signature,
"participant": {
"id": self.client_id,
},
"conversationId": self.conversation_id,
},
],
"invocationId": str(self.invocation_id),
"target": "chat",
"type": 4,
}
self.invocation_id += 1
class _Conversation:
"""
Conversation API
"""
def __init__(
self,
cookies,
proxy,
) -> None:
self.struct: dict = {
"conversationId": None,
"clientId": None,
"conversationSignature": None,
"result": {"value": "Success", "message": None},
}
import httpx
self.proxy = proxy
proxy = (
proxy
or os.environ.get("all_proxy")
or os.environ.get("ALL_PROXY")
or os.environ.get("https_proxy")
or os.environ.get("HTTPS_PROXY")
or None
)
if proxy is not None and proxy.startswith("socks5h://"):
proxy = "socks5://" + proxy[len("socks5h://") :]
self.session = httpx.Client(
proxies=proxy,
timeout=30,
headers=HEADERS_INIT_CONVER,
)
for cookie in cookies:
self.session.cookies.set(cookie["name"], cookie["value"])
# Send GET request
response = self.session.get(
url=os.environ.get("BING_PROXY_URL")
or "https://edgeservices.bing.com/edgesvc/turing/conversation/create",
)
if response.status_code != 200:
response = self.session.get(
"https://edge.churchless.tech/edgesvc/turing/conversation/create",
)
if response.status_code != 200:
print(f"Status code: {response.status_code}")
print(response.text)
print(response.url)
raise Exception("Authentication failed")
try:
self.struct = response.json()
except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:
raise Exception(
"Authentication failed. You have not been accepted into the beta.",
) from exc
if self.struct["result"]["value"] == "UnauthorizedRequest":
raise NotAllowedToAccess(self.struct["result"]["message"])
class _ChatHub:
"""
Chat API
"""
def __init__(self, conversation) -> None:
self.wss = None
self.request: _ChatHubRequest
self.loop: bool
self.task: asyncio.Task
print(conversation.struct)
self.request = _ChatHubRequest(
conversation_signature=conversation.struct["conversationSignature"],
client_id=conversation.struct["clientId"],
conversation_id=conversation.struct["conversationId"],
)
async def ask_stream(
self,
prompt: str,
wss_link: str,
conversation_style: CONVERSATION_STYLE_TYPE = None,
raw: bool = False,
options: dict = None,
) -> Generator[str, None, None]:
"""
Ask a question to the bot
"""
if self.wss and not self.wss.closed:
await self.wss.close()
# Check if websocket is closed
self.wss = await websockets.connect(
wss_link,
extra_headers=HEADERS,
max_size=None,
ssl=get_ssl_context()
)
await self._initial_handshake()
# Construct a ChatHub request
self.request.update(
prompt=prompt,
conversation_style=conversation_style,
options=options,
)
# Send request
await self.wss.send(_append_identifier(self.request.struct))
final = False
while not final:
objects = str(await self.wss.recv()).split(DELIMITER)
for obj in objects:
if obj is None or not obj:
continue
response = json.loads(obj)
if response.get("type") != 2 and raw:
yield False, response
elif response.get("type") == 1 and response["arguments"][0].get(
"messages",
):
resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][
0
]["body"][0].get("text")
yield False, resp_txt
elif response.get("type") == 2:
final = True
yield True, response
async def _initial_handshake(self) -> None:
await self.wss.send(_append_identifier({"protocol": "json", "version": 1}))
await self.wss.recv()
async def close(self) -> None:
"""
Close the connection
"""
if self.wss and not self.wss.closed:
await self.wss.close()
class NewbingChatbot:
"""
Combines everything to make it seamless
"""
def __init__(
self,
cookies,
proxy
) -> None:
if cookies is None:
cookies = {}
self.cookies = cookies
self.proxy = proxy
self.chat_hub: _ChatHub = _ChatHub(
_Conversation(self.cookies, self.proxy),
)
async def ask(
self,
prompt: str,
wss_link: str,
conversation_style: CONVERSATION_STYLE_TYPE = None,
options: dict = None,
) -> dict:
"""
Ask a question to the bot
"""
async for final, response in self.chat_hub.ask_stream(
prompt=prompt,
conversation_style=conversation_style,
wss_link=wss_link,
options=options,
):
if final:
return response
await self.chat_hub.wss.close()
return None
async def ask_stream(
self,
prompt: str,
wss_link: str,
conversation_style: CONVERSATION_STYLE_TYPE = None,
raw: bool = False,
options: dict = None,
) -> Generator[str, None, None]:
"""
Ask a question to the bot
"""
async for response in self.chat_hub.ask_stream(
prompt=prompt,
conversation_style=conversation_style,
wss_link=wss_link,
raw=raw,
options=options,
):
yield response
async def close(self) -> None:
"""
Close the connection
"""
await self.chat_hub.close()
async def reset(self) -> None:
"""
Reset the conversation
"""
await self.close()
self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy))

View File

@ -10,7 +10,8 @@ def validate_path():
validate_path() # validate path so you can run from base directory
if __name__ == "__main__":
from request_llm.bridge_newbingfree import predict_no_ui_long_connection
from request_llm.bridge_chatglmft import predict_no_ui_long_connection
# from request_llm.bridge_newbingfree import predict_no_ui_long_connection
# from request_llm.bridge_moss import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
@ -27,52 +28,3 @@ if __name__ == "__main__":
sys_prompt="")
print('final result:', result)
result = predict_no_ui_long_connection(inputs="what is a hero?",
llm_kwargs=llm_kwargs,
history=["hello world"],
sys_prompt="")
print('final result:', result)
result = predict_no_ui_long_connection(inputs="如何理解传奇?",
llm_kwargs=llm_kwargs,
history=[],
sys_prompt="")
print('final result:', result)
# # print(result)
# from multiprocessing import Process, Pipe
# class GetGLMHandle(Process):
# def __init__(self):
# super().__init__(daemon=True)
# pass
# def run(self):
# # 子进程执行
# # 第一次运行,加载参数
# def validate_path():
# import os, sys
# dir_name = os.path.dirname(__file__)
# root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
# os.chdir(root_dir_assume + '/request_llm/jittorllms')
# sys.path.append(root_dir_assume + '/request_llm/jittorllms')
# validate_path() # validate path so you can run from base directory
# jittorllms_model = None
# import types
# try:
# if jittorllms_model is None:
# from models import get_model
# # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
# args_dict = {'model': 'chatrwkv'}
# print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
# jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
# print('done get model')
# except:
# # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
# raise RuntimeError("不能正常加载jittorllms的参数")
# x = GetGLMHandle()
# x.start()
# input()

View File

@ -1,4 +1,5 @@
./docs/gradio-3.32.2-py3-none-any.whl
pydantic==1.10.11
tiktoken>=0.3.3
requests[socks]
transformers

47
theme/common.js Normal file
View File

@ -0,0 +1,47 @@
function ChatBotHeight() {
function update_height(){
var { panel_height_target, chatbot_height, chatbot } = get_elements();
if (panel_height_target!=chatbot_height)
{
var pixelString = panel_height_target.toString() + 'px';
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
}
}
function update_height_slow(){
var { panel_height_target, chatbot_height, chatbot } = get_elements();
if (panel_height_target!=chatbot_height)
{
new_panel_height = (panel_height_target - chatbot_height)*0.5 + chatbot_height;
if (Math.abs(new_panel_height - panel_height_target) < 10){
new_panel_height = panel_height_target;
}
// console.log(chatbot_height, panel_height_target, new_panel_height);
var pixelString = new_panel_height.toString() + 'px';
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
}
}
update_height();
setInterval(function() {
update_height_slow()
}, 50); // 每100毫秒执行一次
}
function get_elements() {
var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
if (!chatbot) {
chatbot = document.querySelector('#gpt-chatbot');
}
const panel1 = document.querySelector('#input-panel');
const panel2 = document.querySelector('#basic-panel');
const panel3 = document.querySelector('#plugin-panel');
const panel4 = document.querySelector('#interact-panel');
const panel5 = document.querySelector('#input-panel2');
const panel_active = document.querySelector('#state-panel');
var panel_height_target = (20-panel_active.offsetHeight) + panel1.offsetHeight + panel2.offsetHeight + panel3.offsetHeight + panel4.offsetHeight + panel5.offsetHeight + 21;
var panel_height_target = parseInt(panel_height_target);
var chatbot_height = chatbot.style.height;
var chatbot_height = parseInt(chatbot_height);
return { panel_height_target, chatbot_height, chatbot };
}

View File

@ -1,108 +1,3 @@
import gradio as gr
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU')
# gradio可用颜色列表
# gr.themes.utils.colors.slate (石板色)
# gr.themes.utils.colors.gray (灰色)
# gr.themes.utils.colors.zinc (锌色)
# gr.themes.utils.colors.neutral (中性色)
# gr.themes.utils.colors.stone (石头色)
# gr.themes.utils.colors.red (红色)
# gr.themes.utils.colors.orange (橙色)
# gr.themes.utils.colors.amber (琥珀色)
# gr.themes.utils.colors.yellow (黄色)
# gr.themes.utils.colors.lime (酸橙色)
# gr.themes.utils.colors.green (绿色)
# gr.themes.utils.colors.emerald (祖母绿)
# gr.themes.utils.colors.teal (青蓝色)
# gr.themes.utils.colors.cyan (青色)
# gr.themes.utils.colors.sky (天蓝色)
# gr.themes.utils.colors.blue (蓝色)
# gr.themes.utils.colors.indigo (靛蓝色)
# gr.themes.utils.colors.violet (紫罗兰色)
# gr.themes.utils.colors.purple (紫色)
# gr.themes.utils.colors.fuchsia (洋红色)
# gr.themes.utils.colors.pink (粉红色)
# gr.themes.utils.colors.rose (玫瑰色)
def adjust_theme():
try:
color_er = gr.themes.utils.colors.fuchsia
set_theme = gr.themes.Default(
primary_hue=gr.themes.utils.colors.orange,
neutral_hue=gr.themes.utils.colors.gray,
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
set_theme.set(
# Colors
input_background_fill_dark="*neutral_800",
# Transition
button_transition="none",
# Shadows
button_shadow="*shadow_drop",
button_shadow_hover="*shadow_drop_lg",
button_shadow_active="*shadow_inset",
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
checkbox_label_shadow="*shadow_drop",
block_shadow="*shadow_drop",
form_gap_width="1px",
# Button borders
input_border_width="1px",
input_background_fill="white",
# Gradients
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
error_background_fill_dark="*background_fill_primary",
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
button_primary_border_color_dark="*primary_500",
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
button_cancel_border_color=color_er.c200,
button_cancel_border_color_dark=color_er.c600,
button_cancel_text_color=color_er.c600,
button_cancel_text_color_dark="white",
)
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js = """
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script>
"""
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except:
set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色')
return set_theme
advanced_css = """
.markdown-body table {
margin: 1em 0;
border-collapse: collapse;
@ -187,10 +82,13 @@ advanced_css = """
margin: 1em 2em 1em 0.5em;
}
"""
if CODE_HIGHLIGHT:
advanced_css += """
.app.svelte-1mya07g.svelte-1mya07g {
max-width: 95%;
position: relative;
padding: var(--size-4);
width: 95%;
height: 100%;
}
.codehilite .hll { background-color: #6e7681 }
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
@ -350,4 +248,3 @@ if CODE_HIGHLIGHT:
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
"""

87
theme/default.py Normal file
View File

@ -0,0 +1,87 @@
import gradio as gr
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
def adjust_theme():
try:
color_er = gr.themes.utils.colors.fuchsia
set_theme = gr.themes.Default(
primary_hue=gr.themes.utils.colors.orange,
neutral_hue=gr.themes.utils.colors.gray,
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
set_theme.set(
# Colors
input_background_fill_dark="*neutral_800",
# Transition
button_transition="none",
# Shadows
button_shadow="*shadow_drop",
button_shadow_hover="*shadow_drop_lg",
button_shadow_active="*shadow_inset",
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
checkbox_label_shadow="*shadow_drop",
block_shadow="*shadow_drop",
form_gap_width="1px",
# Button borders
input_border_width="1px",
input_background_fill="white",
# Gradients
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
error_background_fill_dark="*background_fill_primary",
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
button_primary_border_color_dark="*primary_500",
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
button_cancel_border_color=color_er.c200,
button_cancel_border_color_dark=color_er.c600,
button_cancel_text_color=color_er.c600,
button_cancel_text_color_dark="white",
)
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('theme/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js += """
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script>
"""
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except:
set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色')
return set_theme
with open("theme/default.css", "r", encoding="utf-8") as f:
advanced_css = f.read()

806
theme/green.css Normal file
View File

@ -0,0 +1,806 @@
:root {
--chatbot-color-light: #000000;
--chatbot-color-dark: #FFFFFF;
--chatbot-background-color-light: #F3F3F3;
--chatbot-background-color-dark: #121111;
--message-user-background-color-light: #95EC69;
--message-user-background-color-dark: #26B561;
--message-bot-background-color-light: #FFFFFF;
--message-bot-background-color-dark: #2C2C2C;
}
mspace {
display: block;
}
@media only screen and (max-width: 767px) {
#column_1 {
display: none !important;
}
}
@keyframes highlight {
0%, 100% {
border: 2px solid transparent;
}
50% {
border-color: yellow;
}
}
#highlight_update {
animation-name: highlight;
animation-duration: 0.75s;
animation-iteration-count: 3;
}
.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno {
border: 0px solid var(--border-color-primary) !important;
}
#examples_col {
z-index: 2;
position: absolute;
bottom: 0;
left: 0;
width: 100%;
margin-bottom: 30% !important;
}
#hide_examples {
z-index: 0;
}
#debug_mes {
position: absolute;
display: flex;
bottom: 0;
left: 0;
z-index: 1; /* 设置更高的 z-index 值 */
margin-bottom: -4px !important;
align-self: flex-end;
}
#chat_box {
display: flex;
flex-direction: column;
overflow-y: visible !important;
z-index: 3;
flex-grow: 1; /* 自动填充剩余空间 */
position: absolute;
bottom: 0;
left: 0;
width: 100%;
margin-bottom: 30px !important;
border: 1px solid var(--border-color-primary);
}
.toast-body {
z-index: 5 !important;
}
.chat_input {
}
.sm_btn {
position: relative;
bottom: 5px;
height: 10%;
border-radius: 20px!important;
min-width: min(10%,100%) !important;
overflow: hidden;
}
.sm_select {
position: relative !important;
z-index: 5 !important;
bottom: 5px;
min-width: min(20%,100%) !important;
border-radius: 20px!important;
}
.sm_checkbox {
position: relative !important;
z-index: 5 !important;
bottom: 5px;
padding: 0 !important;
}
.sm_select .wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
padding: 0 !important;
}
.sm_select .block.svelte-mppz8v {
width: 10% !important;
}
/* usage_display */
.insert_block {
position: relative;
bottom: 2px;
min-width: min(55px,100%) !important;
}
.submit_btn {
flex-direction: column-reverse;
overflow-y: auto !important;
position: absolute;
bottom: 0;
right: 10px;
margin-bottom: 10px !important;
min-width: min(50px,100%) !important;
}
textarea {
resize: none;
height: 100%; /* 填充父元素的高度 */
}
#main_chatbot {
height: 75vh !important;
max-height: 75vh !important;
/* overflow: auto !important; */
z-index: 2;
transform: translateZ(0) !important;
backface-visibility: hidden !important;
will-change: transform !important;
}
#prompt_result{
height: 60vh !important;
max-height: 60vh !important;
}
#app_title {
font-weight: var(--prose-header-text-weight);
font-size: var(--text-xxl);
line-height: 1.3;
text-align: left;
margin-top: 6px;
white-space: nowrap;
}
#description {
text-align: center;
margin: 32px 0 4px 0;
}
/* gradio的页脚信息 */
footer {
/* display: none !important; */
margin-top: .2em !important;
font-size: 85%;
}
#footer {
text-align: center;
}
#footer div {
display: inline-block;
}
#footer .versions{
font-size: 85%;
opacity: 0.60;
}
/* user_info */
#float_display {
position: absolute;
max-height: 30px;
}
/* user_info */
#user_info {
white-space: nowrap;
position: absolute; left: 8em; top: .2em;
z-index: var(--layer-2);
box-shadow: var(--block-shadow);
border: none; border-radius: var(--block-label-radius);
background: var(--color-accent);
padding: var(--block-label-padding);
font-size: var(--block-label-text-size); line-height: var(--line-sm);
width: auto; min-height: 30px !important;
opacity: 1;
transition: opacity 0.3s ease-in-out;
}
textarea.svelte-1pie7s6 {
background: #e7e6e6 !important;
width: 96% !important;
}
.dark textarea.svelte-1pie7s6 {
background: var(--input-background-fill) !important;
width: 96% !important;
}
.dark input[type=number].svelte-1cl284s {
background: #393939 !important;
border: var(--input-border-width) solid var(--input-border-color) !important;
}
.dark input[type="range"] {
background: #393939 !important;
}
#user_info .wrap {
opacity: 0;
}
#user_info p {
color: white;
font-weight: var(--block-label-text-weight);
}
#user_info.hideK {
opacity: 0;
transition: opacity 1s ease-in-out;
}
[class *= "message"] {
gap: 7px !important;
border-radius: var(--radius-xl) !important
}
/* debug_mes */
#debug_mes {
min-height: 2em;
align-items: flex-end;
justify-content: flex-end;
}
#debug_mes p {
font-size: .85em;
font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace;
/* Windows下中文的monospace会fallback为新宋体实在太丑这里折中使用微软雅黑 */
color: #000000;
}
.dark #debug_mes p {
color: #ee65ed;
}
#debug_mes {
transition: all 0.6s;
}
#main_chatbot {
transition: height 0.3s ease;
}
/* .wrap.svelte-18telvq.svelte-18telvq {
padding: var(--block-padding) !important;
height: 100% !important;
max-height: 95% !important;
overflow-y: auto !important;
}*/
.app.svelte-1mya07g.svelte-1mya07g {
max-width: 100%;
position: relative;
padding: var(--size-4);
width: 100%;
height: 100%;
}
.gradio-container-3-32-2 h1 {
font-weight: 700 !important;
font-size: 28px !important;
}
.gradio-container-3-32-2 h2 {
font-weight: 600 !important;
font-size: 24px !important;
}
.gradio-container-3-32-2 h3 {
font-weight: 500 !important;
font-size: 20px !important;
}
.gradio-container-3-32-2 h4 {
font-weight: 400 !important;
font-size: 16px !important;
}
.gradio-container-3-32-2 h5 {
font-weight: 300 !important;
font-size: 14px !important;
}
.gradio-container-3-32-2 h6 {
font-weight: 200 !important;
font-size: 12px !important;
}
#usage_display p, #usage_display span {
margin: 0;
font-size: .85em;
color: var(--body-text-color-subdued);
}
.progress-bar {
background-color: var(--input-background-fill);;
margin: .5em 0 !important;
height: 20px;
border-radius: 10px;
overflow: hidden;
}
.progress {
background-color: var(--block-title-background-fill);
height: 100%;
border-radius: 10px;
text-align: right;
transition: width 0.5s ease-in-out;
}
.progress-text {
/* color: white; */
color: var(--color-accent) !important;
font-size: 1em !important;
font-weight: bold;
padding-right: 10px;
line-height: 20px;
}
.apSwitch {
top: 2px;
display: inline-block;
height: 24px;
position: relative;
width: 48px;
border-radius: 12px;
}
.apSwitch input {
display: none !important;
}
.apSlider {
background-color: var(--neutral-200);
bottom: 0;
cursor: pointer;
left: 0;
position: absolute;
right: 0;
top: 0;
transition: .4s;
font-size: 18px;
border-radius: 7px;
}
.apSlider::before {
bottom: -1.5px;
left: 1px;
position: absolute;
transition: .4s;
content: "🌞";
}
hr.append-display {
margin: 8px 0;
border: none;
height: 1px;
border-top-width: 0;
background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1));
}
.source-a {
font-size: 0.8em;
max-width: 100%;
margin: 0;
display: flex;
flex-direction: row;
flex-wrap: wrap;
align-items: center;
/* background-color: #dddddd88; */
border-radius: 1.5rem;
padding: 0.2em;
}
.source-a a {
display: inline-block;
background-color: #aaaaaa50;
border-radius: 1rem;
padding: 0.5em;
text-align: center;
text-overflow: ellipsis;
overflow: hidden;
min-width: 20%;
white-space: nowrap;
margin: 0.2rem 0.1rem;
text-decoration: none !important;
flex: 1;
transition: flex 0.5s;
}
.source-a a:hover {
background-color: #aaaaaa20;
flex: 2;
}
input:checked + .apSlider {
background-color: var(--primary-600);
}
input:checked + .apSlider::before {
transform: translateX(23px);
content:"🌚";
}
/* Override Slider Styles (for webkit browsers like Safari and Chrome)
* 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410
* 进度滑块在各个平台还是太不统一了
*/
input[type="range"] {
-webkit-appearance: none;
height: 4px;
background: var(--input-background-fill);
border-radius: 5px;
background-image: linear-gradient(var(--primary-500),var(--primary-500));
background-size: 0% 100%;
background-repeat: no-repeat;
}
input[type="range"]::-webkit-slider-thumb {
-webkit-appearance: none;
height: 20px;
width: 20px;
border-radius: 50%;
border: solid 0.5px #ddd;
background-color: white;
cursor: ew-resize;
box-shadow: var(--input-shadow);
transition: background-color .1s ease;
}
input[type="range"]::-webkit-slider-thumb:hover {
background: var(--neutral-50);
}
input[type=range]::-webkit-slider-runnable-track {
-webkit-appearance: none;
box-shadow: none;
border: none;
background: transparent;
}
.submit_btn, #cancel_btn {
height: 42px !important;
}
.submit_btn::before {
content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
height: 21px;
}
#cancel_btn::before {
content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
height: 21px;
}
/* list */
ol:not(.options), ul:not(.options) {
padding-inline-start: 2em !important;
}
/* 亮色(默认) */
#main_chatbot {
background-color: var(--chatbot-background-color-light) !important;
color: var(--chatbot-color-light) !important;
}
/* 暗色 */
.dark #main_chatbot {
background-color: var(--block-background-fill) !important;
color: var(--chatbot-color-dark) !important;
}
/* 屏幕宽度大于等于500px的设备 */
/* update on 2023.4.8: 高度的细致调整已写入JavaScript */
@media screen and (min-width: 500px) {
#main_chatbot {
height: calc(100vh - 200px);
}
#main_chatbot .wrap {
max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
}
}
/* 屏幕宽度小于500px的设备 */
@media screen and (max-width: 499px) {
#main_chatbot {
height: calc(100vh - 140px);
}
#main_chatbot .wrap {
max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
}
[data-testid = "bot"] {
max-width: 95% !important;
}
#app_title h1{
letter-spacing: -1px; font-size: 22px;
}
}
#main_chatbot .wrap {
overflow-x: hidden
}
/* 对话气泡 */
.message {
border-radius: var(--radius-xl) !important;
border: none;
padding: var(--spacing-xl) !important;
font-size: 15px !important;
line-height: var(--line-md) !important;
min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
}
[data-testid = "bot"] {
max-width: 85%;
border-bottom-left-radius: 0 !important;
}
[data-testid = "user"] {
max-width: 85%;
width: auto !important;
border-bottom-right-radius: 0 !important;
}
.message p {
margin-top: 0.6em !important;
margin-bottom: 0.6em !important;
}
.message p:first-child { margin-top: 0 !important; }
.message p:last-of-type { margin-bottom: 0 !important; }
.message .md-message {
display: block;
padding: 0 !important;
}
.message .raw-message {
display: block;
padding: 0 !important;
white-space: pre-wrap;
}
.raw-message.hideM, .md-message.hideM {
display: none;
}
/* custom buttons */
.chuanhu-btn {
border-radius: 5px;
/* background-color: #E6E6E6 !important; */
color: rgba(120, 120, 120, 0.64) !important;
padding: 4px !important;
position: absolute;
right: -22px;
cursor: pointer !important;
transition: color .2s ease, background-color .2s ease;
}
.chuanhu-btn:hover {
background-color: rgba(167, 167, 167, 0.25) !important;
color: unset !important;
}
.chuanhu-btn:active {
background-color: rgba(167, 167, 167, 0.5) !important;
}
.chuanhu-btn:focus {
outline: none;
}
.copy-bot-btn {
/* top: 18px; */
bottom: 0;
}
.toggle-md-btn {
/* top: 0; */
bottom: 20px;
}
.copy-code-btn {
position: relative;
float: right;
font-size: 1em;
cursor: pointer;
}
.message-wrap>div img{
border-radius: 10px !important;
}
/* history message */
.wrap>.history-message {
padding: 10px !important;
}
.history-message {
/* padding: 0 !important; */
opacity: 80%;
display: flex;
flex-direction: column;
}
.history-message>.history-message {
padding: 0 !important;
}
.history-message>.message-wrap {
padding: 0 !important;
margin-bottom: 16px;
}
.history-message>.message {
margin-bottom: 16px;
}
.wrap>.history-message::after {
content: "";
display: block;
height: 2px;
background-color: var(--body-text-color-subdued);
margin-bottom: 10px;
margin-top: -10px;
clear: both;
}
.wrap>.history-message>:last-child::after {
content: "仅供查看";
display: block;
text-align: center;
color: var(--body-text-color-subdued);
font-size: 0.8em;
}
/* 表格 */
table {
margin: 1em 0;
border-collapse: collapse;
empty-cells: show;
}
td,th {
border: 1.2px solid var(--border-color-primary) !important;
padding: 0.2em;
}
thead {
background-color: rgba(175,184,193,0.2);
}
thead th {
padding: .5em .2em;
}
/* 行内代码 */
.message :not(pre) code {
display: inline;
white-space: break-spaces;
border-radius: 6px;
margin: 0 2px 0 2px;
padding: .2em .4em .1em .4em;
background-color: rgba(175,184,193,0.2);
}
/* 代码块 */
.message pre code {
display: block;
overflow: auto;
white-space: pre;
background-color: hsla(0, 0%, 7%, 70%)!important;
border-radius: 10px;
padding: 1.2em 1em 0em .5em;
margin: 0.6em 2em 1em 0.2em;
color: #FFF;
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
}
.dark .message pre code {
background-color: hsla(0, 0%, 20%, 300%)!important;
}
.message pre {
padding: 0 !important;
}
.message pre code div.highlight {
background-color: unset !important;
}
button.copy-button {
display: none;
}
/* 代码高亮样式 */
.codehilite .hll { background-color: #6e7681 }
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
.codehilite .err { color: #f85149 } /* Error */
.codehilite .esc { color: #c9d1d9 } /* Escape */
.codehilite .g { color: #c9d1d9 } /* Generic */
.codehilite .k { color: #ff7b72 } /* Keyword */
.codehilite .l { color: #a5d6ff } /* Literal */
.codehilite .n { color: #c9d1d9 } /* Name */
.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */
.codehilite .x { color: #c9d1d9 } /* Other */
.codehilite .p { color: #c9d1d9 } /* Punctuation */
.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */
.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */
.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */
.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */
.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */
.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */
.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */
.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */
.codehilite .gr { color: #ffa198 } /* Generic.Error */
.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */
.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */
.codehilite .go { color: #8b949e } /* Generic.Output */
.codehilite .gp { color: #8b949e } /* Generic.Prompt */
.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */
.codehilite .gu { color: #79c0ff } /* Generic.Subheading */
.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */
.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */
.codehilite .kc { color: #79c0ff } /* Keyword.Constant */
.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */
.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */
.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */
.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */
.codehilite .kt { color: #ff7b72 } /* Keyword.Type */
.codehilite .ld { color: #79c0ff } /* Literal.Date */
.codehilite .m { color: #a5d6ff } /* Literal.Number */
.codehilite .s { color: #a5d6ff } /* Literal.String */
.codehilite .na { color: #c9d1d9 } /* Name.Attribute */
.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */
.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */
.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */
.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */
.codehilite .ni { color: #ffa657 } /* Name.Entity */
.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */
.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */
.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */
.codehilite .nn { color: #ff7b72 } /* Name.Namespace */
.codehilite .nx { color: #c9d1d9 } /* Name.Other */
.codehilite .py { color: #79c0ff } /* Name.Property */
.codehilite .nt { color: #7ee787 } /* Name.Tag */
.codehilite .nv { color: #79c0ff } /* Name.Variable */
.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */
.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */
.codehilite .w { color: #6e7681 } /* Text.Whitespace */
.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */
.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */
.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */
.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */
.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */
.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */
.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */
.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */
.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */
.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */
.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */
.codehilite .se { color: #79c0ff } /* Literal.String.Escape */
.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */
.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */
.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */
.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */
.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */
.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */
.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */
.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */
.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */
.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */
.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */
.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */
.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */
.dark .codehilite .hll { background-color: #2C3B41 }
.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */
.dark .codehilite .err { color: #FF5370 } /* Error */
.dark .codehilite .esc { color: #89DDFF } /* Escape */
.dark .codehilite .g { color: #EEFFFF } /* Generic */
.dark .codehilite .k { color: #BB80B3 } /* Keyword */
.dark .codehilite .l { color: #C3E88D } /* Literal */
.dark .codehilite .n { color: #EEFFFF } /* Name */
.dark .codehilite .o { color: #89DDFF } /* Operator */
.dark .codehilite .p { color: #89DDFF } /* Punctuation */
.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */
.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */
.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */
.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */
.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */
.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */
.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */
.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */
.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */
.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */
.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */
.dark .codehilite .go { color: #79d618 } /* Generic.Output */
.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */
.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */
.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */
.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */
.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */
.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */
.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */
.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */
.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */
.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */
.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */
.dark .codehilite .m { color: #F78C6C } /* Literal.Number */
.dark .codehilite .s { color: #C3E88D } /* Literal.String */
.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */
.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */
.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */
.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */
.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */
.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */
.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */
.dark .codehilite .nf { color: #82AAFF } /* Name.Function */
.dark .codehilite .nl { color: #82AAFF } /* Name.Label */
.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */
.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */
.dark .codehilite .py { color: #FFCB6B } /* Name.Property */
.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */
.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */
.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */
.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */
.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */
.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */
.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */
.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */
.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */
.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */
.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */
.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */
.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */
.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */
.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */
.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */
.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */
.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */
.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */
.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */
.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */
.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */
.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */
.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */
.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */
.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */
.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */

104
theme/green.py Normal file
View File

@ -0,0 +1,104 @@
import gradio as gr
from toolbox import get_conf
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
def adjust_theme():
try:
set_theme = gr.themes.Soft(
primary_hue=gr.themes.Color(
c50="#EBFAF2",
c100="#CFF3E1",
c200="#A8EAC8",
c300="#77DEA9",
c400="#3FD086",
c500="#02C160",
c600="#06AE56",
c700="#05974E",
c800="#057F45",
c900="#04673D",
c950="#2E5541",
name="small_and_beautiful",
),
secondary_hue=gr.themes.Color(
c50="#576b95",
c100="#576b95",
c200="#576b95",
c300="#576b95",
c400="#576b95",
c500="#576b95",
c600="#576b95",
c700="#576b95",
c800="#576b95",
c900="#576b95",
c950="#576b95",
),
neutral_hue=gr.themes.Color(
name="gray",
c50="#f6f7f8",
# c100="#f3f4f6",
c100="#F2F2F2",
c200="#e5e7eb",
c300="#d1d5db",
c400="#B2B2B2",
c500="#808080",
c600="#636363",
c700="#515151",
c800="#393939",
# c900="#272727",
c900="#2B2B2B",
c950="#171717",
),
radius_size=gr.themes.sizes.radius_sm,
).set(
button_primary_background_fill="*primary_500",
button_primary_background_fill_dark="*primary_600",
button_primary_background_fill_hover="*primary_400",
button_primary_border_color="*primary_500",
button_primary_border_color_dark="*primary_600",
button_primary_text_color="wihte",
button_primary_text_color_dark="white",
button_secondary_background_fill="*neutral_100",
button_secondary_background_fill_hover="*neutral_50",
button_secondary_background_fill_dark="*neutral_900",
button_secondary_text_color="*neutral_800",
button_secondary_text_color_dark="white",
background_fill_primary="#F7F7F7",
background_fill_primary_dark="#1F1F1F",
block_title_text_color="*primary_500",
block_title_background_fill_dark="*primary_900",
block_label_background_fill_dark="*primary_900",
input_background_fill="#F6F6F6",
chatbot_code_background_color="*neutral_950",
chatbot_code_background_color_dark="*neutral_950",
)
js = ''
if LAYOUT=="TOP-DOWN":
js = ""
else:
with open('theme/common.js', 'r', encoding='utf8') as f:
js = f"<script>{f.read()}</script>"
# 添加一个萌萌的看板娘
if ADD_WAIFU:
js += """
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
<script src="file=docs/waifu_plugin/autoload.js"></script>
"""
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
except:
set_theme = None
print('gradio版本较旧, 不能自定义字体和颜色')
return set_theme
with open("theme/green.css", "r", encoding="utf-8") as f:
advanced_css = f.read()

12
theme/theme.py Normal file
View File

@ -0,0 +1,12 @@
import gradio as gr
from toolbox import get_conf
THEME, = get_conf('THEME')
if THEME == 'Chuanhu-Small-and-Beautiful':
from .green import adjust_theme, advanced_css
theme_declaration = "<h2 align=\"center\" class=\"small\">[Chuanhu-Small-and-Beautiful主题]</h2>"
else:
from .default import adjust_theme, advanced_css
theme_declaration = ""

View File

@ -1,11 +1,13 @@
import markdown
import importlib
import traceback
import time
import inspect
import re
import os
import gradio
from latex2mathml.converter import convert as tex2mathml
from functools import wraps, lru_cache
pj = os.path.join
"""
========================================================================
@ -39,7 +41,7 @@ def ArgsGeneralWrapper(f):
"""
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
"""
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
def decorated(request: gradio.Request, cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
txt_passon = txt
if txt == "" and txt2 != "": txt_passon = txt2
# 引入一个有cookie的chatbot
@ -53,13 +55,21 @@ def ArgsGeneralWrapper(f):
'top_p':top_p,
'max_length': max_length,
'temperature':temperature,
'client_ip': request.client.host,
}
plugin_kwargs = {
"advanced_arg": plugin_advanced_arg,
}
chatbot_with_cookie = ChatBotWithCookies(cookies)
chatbot_with_cookie.write_list(chatbot)
if cookies.get('lock_plugin', None) is None:
# 正常状态
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
else:
# 处理个别特殊插件的锁定状态
module, fn_name = cookies['lock_plugin'].split('->')
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
return decorated
@ -67,8 +77,32 @@ def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
"""
刷新用户界面
"""
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时可用clear将其清空然后用for+append循环重新赋值。"
yield chatbot.get_cookies(), chatbot, history, msg
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时, 可用clear将其清空, 然后用for+append循环重新赋值。"
cookies = chatbot.get_cookies()
# 解决插件锁定时的界面显示问题
if cookies.get('lock_plugin', None):
label = cookies.get('llm_model', "") + " | " + "正在锁定插件" + cookies.get('lock_plugin', None)
chatbot_gr = gradio.update(value=chatbot, label=label)
if cookies.get('label', "") != label: cookies['label'] = label # 记住当前的label
elif cookies.get('label', None):
chatbot_gr = gradio.update(value=chatbot, label=cookies.get('llm_model', ""))
cookies['label'] = None # 清空label
else:
chatbot_gr = chatbot
yield cookies, chatbot_gr, history, msg
def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面
"""
刷新用户界面
"""
if len(chatbot) == 0: chatbot.append(["update_ui_last_msg", lastmsg])
chatbot[-1] = list(chatbot[-1])
chatbot[-1][-1] = lastmsg
yield from update_ui(chatbot=chatbot, history=history)
time.sleep(delay)
def trimmed_format_exc():
import os, traceback
@ -83,7 +117,7 @@ def CatchException(f):
"""
@wraps(f)
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT=-1):
try:
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
except Exception as e:
@ -180,7 +214,7 @@ def write_results_to_file(history, file_name=None):
# remove everything that cannot be handled by utf8
f.write(content.encode('utf-8', 'ignore').decode())
f.write('\n\n')
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
res = '以上材料已经被写入:\t' + os.path.abspath(f'./gpt_log/{file_name}')
print(res)
return res
@ -210,16 +244,21 @@ def text_divide_paragraph(text):
"""
将文本按照段落分隔符分割开生成带有段落标签的HTML代码。
"""
pre = '<div class="markdown-body">'
suf = '</div>'
if text.startswith(pre) and text.endswith(suf):
return text
if '```' in text:
# careful input
return text
return pre + text + suf
else:
# wtf input
lines = text.split("\n")
for i, line in enumerate(lines):
lines[i] = lines[i].replace(" ", "&nbsp;")
text = "</br>".join(lines)
return text
return pre + text + suf
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
def markdown_convertion(txt):
@ -331,8 +370,11 @@ def format_io(self, y):
if y is None or y == []:
return []
i_ask, gpt_reply = y[-1]
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
# 输入部分太自由,预处理一波
if i_ask is not None: i_ask = text_divide_paragraph(i_ask)
# 当代码输出半截的时候,试着补上后个```
if gpt_reply is not None: gpt_reply = close_up_code_segment_during_stream(gpt_reply)
# process
y[-1] = (
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
None if gpt_reply is None else markdown_convertion(gpt_reply)
@ -380,7 +422,7 @@ def extract_archive(file_path, dest_dir):
print("Successfully extracted rar archive to {}".format(dest_dir))
except:
print("Rar format requires additional dependencies to install")
return '\n\n需要安装pip install rarfile来解压rar文件'
return '\n\n解压失败! 需要安装pip install rarfile来解压rar文件'
# 第三方库需要预先pip install py7zr
elif file_extension == '.7z':
@ -391,7 +433,7 @@ def extract_archive(file_path, dest_dir):
print("Successfully extracted 7z archive to {}".format(dest_dir))
except:
print("7z format requires additional dependencies to install")
return '\n\n需要安装pip install py7zr来解压7z文件'
return '\n\n解压失败! 需要安装pip install py7zr来解压7z文件'
else:
return ''
return ''
@ -420,6 +462,20 @@ def find_recent_files(directory):
return recent_files
def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
# 将文件复制一份到下载区
import shutil
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
new_path = os.path.join(f'./gpt_log/', rename_file)
# 如果已经存在,先删除
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
# 把文件复制过去
if not os.path.exists(new_path): shutil.copyfile(file, new_path)
# 将文件添加到chatbot cookie中避免多用户干扰
if chatbot:
if 'file_to_promote' in chatbot._cookies: current = chatbot._cookies['file_to_promote']
else: current = []
chatbot._cookies.update({'file_to_promote': [new_path] + current})
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
"""
@ -459,25 +515,39 @@ def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
return chatbot, txt, txt2
def on_report_generated(files, chatbot):
def on_report_generated(cookies, files, chatbot):
from toolbox import find_recent_files
if 'file_to_promote' in cookies:
report_files = cookies['file_to_promote']
cookies.pop('file_to_promote')
else:
report_files = find_recent_files('gpt_log')
if len(report_files) == 0:
return None, chatbot
return cookies, None, chatbot
# files.extend(report_files)
chatbot.append(['报告如何远程获取?', '报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
return report_files, chatbot
file_links = ''
for f in report_files: file_links += f'<br/><a href="file={os.path.abspath(f)}" target="_blank">{f}</a>'
chatbot.append(['报告如何远程获取?', f'报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。{file_links}'])
return cookies, report_files, chatbot
def load_chat_cookies():
API_KEY, LLM_MODEL, AZURE_API_KEY = get_conf('API_KEY', 'LLM_MODEL', 'AZURE_API_KEY')
if is_any_api_key(AZURE_API_KEY):
if is_any_api_key(API_KEY): API_KEY = API_KEY + ',' + AZURE_API_KEY
else: API_KEY = AZURE_API_KEY
return {'api_key': API_KEY, 'llm_model': LLM_MODEL}
def is_openai_api_key(key):
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
return bool(API_MATCH_ORIGINAL)
def is_azure_api_key(key):
API_MATCH_AZURE = re.match(r"[a-zA-Z0-9]{32}$", key)
return bool(API_MATCH_ORIGINAL) or bool(API_MATCH_AZURE)
return bool(API_MATCH_AZURE)
def is_api2d_key(key):
if key.startswith('fk') and len(key) == 41:
return True
else:
return False
API_MATCH_API2D = re.match(r"fk[a-zA-Z0-9]{6}-[a-zA-Z0-9]{32}$", key)
return bool(API_MATCH_API2D)
def is_any_api_key(key):
if ',' in key:
@ -486,10 +556,10 @@ def is_any_api_key(key):
if is_any_api_key(k): return True
return False
else:
return is_openai_api_key(key) or is_api2d_key(key)
return is_openai_api_key(key) or is_api2d_key(key) or is_azure_api_key(key)
def what_keys(keys):
avail_key_list = {'OpenAI Key':0, "API2D Key":0}
avail_key_list = {'OpenAI Key':0, "Azure Key":0, "API2D Key":0}
key_list = keys.split(',')
for k in key_list:
@ -500,7 +570,11 @@ def what_keys(keys):
if is_api2d_key(k):
avail_key_list['API2D Key'] += 1
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']}API2D Key {avail_key_list['API2D Key']}"
for k in key_list:
if is_azure_api_key(k):
avail_key_list['Azure Key'] += 1
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个, Azure Key {avail_key_list['Azure Key']} 个, API2D Key {avail_key_list['API2D Key']}"
def select_api_key(keys, llm_model):
import random
@ -515,8 +589,12 @@ def select_api_key(keys, llm_model):
for k in key_list:
if is_api2d_key(k): avail_key_list.append(k)
if llm_model.startswith('azure-'):
for k in key_list:
if is_azure_api_key(k): avail_key_list.append(k)
if len(avail_key_list) == 0:
raise RuntimeError(f"您提供的api-key不满足要求不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源")
raise RuntimeError(f"您提供的api-key不满足要求不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源右下角更换模型菜单中可切换openai,azure和api2d请求源")
api_key = random.choice(avail_key_list) # 随机负载均衡
return api_key
@ -728,6 +806,8 @@ def clip_history(inputs, history, tokenizer, max_token_limit):
其他小工具:
- zip_folder: 把某个路径下所有文件压缩然后转移到指定的另一个路径中gpt写的
- gen_time_str: 生成时间戳
- ProxyNetworkActivate: 临时地启动代理网络(如果有)
- objdump/objload: 快捷的调试函数
========================================================================
"""
@ -762,11 +842,16 @@ def zip_folder(source_folder, dest_folder, zip_name):
print(f"Zip file created at {zip_file}")
def zip_result(folder):
import time
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
zip_folder(folder, './gpt_log/', f'{t}-result.zip')
return pj('./gpt_log/', f'{t}-result.zip')
def gen_time_str():
import time
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
class ProxyNetworkActivate():
"""
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
@ -775,8 +860,9 @@ class ProxyNetworkActivate():
from toolbox import get_conf
proxies, = get_conf('proxies')
if 'no_proxy' in os.environ: os.environ.pop('no_proxy')
os.environ['HTTP_PROXY'] = proxies['http']
os.environ['HTTPS_PROXY'] = proxies['https']
if proxies is not None:
if 'http' in proxies: os.environ['HTTP_PROXY'] = proxies['http']
if 'https' in proxies: os.environ['HTTPS_PROXY'] = proxies['https']
return self
def __exit__(self, exc_type, exc_value, traceback):
@ -784,3 +870,17 @@ class ProxyNetworkActivate():
if 'HTTP_PROXY' in os.environ: os.environ.pop('HTTP_PROXY')
if 'HTTPS_PROXY' in os.environ: os.environ.pop('HTTPS_PROXY')
return
def objdump(obj, file='objdump.tmp'):
import pickle
with open(file, 'wb+') as f:
pickle.dump(obj, f)
return
def objload(file='objdump.tmp'):
import pickle, os
if not os.path.exists(file):
return
with open(file, 'rb') as f:
return pickle.load(f)

View File

@ -1,5 +1,5 @@
{
"version": 3.37,
"version": 3.44,
"show_feature": true,
"new_feature": "修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持 <-> 提供复旦MOSS模型适配启用需额外依赖 <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D装饰 <-> 完善对话历史的保存/载入/删除 <-> 保存对话功能"
"new_feature": "[改善UI] 动态ChatBot窗口高度 <-> 修复Azure接口的BUG <-> 完善多语言模块 <-> 完善本地Latex矫错和翻译功能 <-> 增加gpt-3.5-16k的支持 <-> 新增最强Arxiv论文翻译插件 <-> 修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件"
}