From c0ed2131f01b4d52a8d6f965af48d43de3a47b43 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 18:33:41 +0800 Subject: [PATCH 01/26] Update and rename bug_report.md to bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.md | 25 ------------- .github/ISSUE_TEMPLATE/bug_report.yml | 54 +++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 25 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index ac66876..0000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -- **(1) Describe the bug 简述** - - -- **(2) Screen Shot 截图** - - -- **(3) Terminal Traceback 终端traceback(如有)** - - -- **(4) Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)** - - - -Before submitting an issue 提交issue之前: -- Please try to upgrade your code. 如果您的代码不是最新的,建议您先尝试更新代码 -- Please check project wiki for common problem solutions.项目[wiki](https://github.com/binary-husky/chatgpt_academic/wiki)有一些常见问题的解决方法 diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..5aa8574 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,54 @@ +name: Report Bug | 报告BUG +description: "Report bug" +title: "[Bug]: " +labels: [] +body: + - type: dropdown + id: download + attributes: + label: Installation Method | 安装方法与平台 + options: + - Pip (我确认使用了最新的requirements.txt安装依赖) + - Anaconda (我确认使用了最新的requirements.txt安装依赖) + - Docker(Windows/Mac) + - Docker(Linux) + - Docker-Compose(Windows/Mac) + - Docker-Compose(Linux) + validations: + required: true + + - type: textarea + id: logs + attributes: + label: Describe the bug | 简述 + description: Describe the bug | 简述 + validations: + required: true + + - type: textarea + id: logs + attributes: + label: Screen Shot | 截图 + description: Screen Shot | 截图 + + - type: textarea + id: logs + attributes: + label: Terminal Traceback 终端traceback(如有) + description: Terminal Traceback 终端traceback(如有) + + - type: textarea + id: logs + attributes: + label: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有) + description: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有) + + + + + + + + + + From 111a65e9e8c6c0ebc1e8810ec879f4de2932f59d Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 18:34:55 +0800 Subject: [PATCH 02/26] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 5aa8574..0ed6332 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -18,7 +18,7 @@ body: required: true - type: textarea - id: logs + id: describe attributes: label: Describe the bug | 简述 description: Describe the bug | 简述 @@ -26,19 +26,19 @@ body: required: true - type: textarea - id: logs + id: screenshot attributes: label: Screen Shot | 截图 description: Screen Shot | 截图 - type: textarea - id: logs + id: traceback attributes: label: Terminal Traceback 终端traceback(如有) description: Terminal Traceback 终端traceback(如有) - type: textarea - id: logs + id: material attributes: label: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有) description: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有) From f8209e51f5abd2bdd0f2ace23a46ff65b36d7ab1 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 18:40:35 +0800 Subject: [PATCH 03/26] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 0ed6332..fb00290 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -20,32 +20,16 @@ body: - type: textarea id: describe attributes: - label: Describe the bug | 简述 - description: Describe the bug | 简述 + label: Describe the bug & Screen Shot | 简述 与 有帮助的截图 + description: Describe the bug & Screen Shot | 简述 与 有帮助的截图 validations: required: true - - - type: textarea - id: screenshot - attributes: - label: Screen Shot | 截图 - description: Screen Shot | 截图 - type: textarea id: traceback attributes: - label: Terminal Traceback 终端traceback(如有) - description: Terminal Traceback 终端traceback(如有) - - - type: textarea - id: material - attributes: - label: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有) - description: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有) - - - - + label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有) + description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有) From 2dd65af9f0d20d56f8105801ec40fc0477b85c6b Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 18:42:52 +0800 Subject: [PATCH 04/26] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index fb00290..7a3c51b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -8,12 +8,14 @@ body: attributes: label: Installation Method | 安装方法与平台 options: - - Pip (我确认使用了最新的requirements.txt安装依赖) - - Anaconda (我确认使用了最新的requirements.txt安装依赖) + - Pip (please confirm: used latest requirements.txt) + - Anaconda (please confirm: used latest requirements.txt) - Docker(Windows/Mac) - Docker(Linux) - Docker-Compose(Windows/Mac) - Docker-Compose(Linux) + - Huggingface + - Others validations: required: true From 3f251e45713fa79f384a04e4dd3182702ad2b33e Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 18:45:23 +0800 Subject: [PATCH 05/26] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 7a3c51b..4e7a1fc 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -8,14 +8,14 @@ body: attributes: label: Installation Method | 安装方法与平台 options: - - Pip (please confirm: used latest requirements.txt) - - Anaconda (please confirm: used latest requirements.txt) + - Pip Install (I used latest requirements.txt and python>=3.8) + - Anaconda (I used latest requirements.txt and python>=3.8) - Docker(Windows/Mac) - Docker(Linux) - Docker-Compose(Windows/Mac) - Docker-Compose(Linux) - Huggingface - - Others + - Others (Please Describe) validations: required: true From 777850200deb1933fdc97f16a693f786a973ca22 Mon Sep 17 00:00:00 2001 From: fuqingxu <505030475@qq.com> Date: Mon, 8 May 2023 19:21:17 +0800 Subject: [PATCH 06/26] update the error handling of moss and chatglm --- check_proxy.py | 14 +++++++++++--- config.py | 2 +- docs/waifu_plugin/autoload.js | 7 +++++++ request_llm/bridge_chatglm.py | 6 +++--- request_llm/bridge_moss.py | 14 ++++++++------ 5 files changed, 30 insertions(+), 13 deletions(-) diff --git a/check_proxy.py b/check_proxy.py index 754b5d3..977802d 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -94,7 +94,7 @@ def get_current_version(): return current_version -def auto_update(): +def auto_update(raise_error=False): """ 一键更新协议:查询版本和用户意见 """ @@ -126,14 +126,22 @@ def auto_update(): try: patch_and_restart(path) except: - print('更新失败。') + msg = '更新失败。' + if raise_error: + from toolbox import trimmed_format_exc + msg += trimmed_format_exc() + print(msg) else: print('自动更新程序:已禁用') return else: return except: - print('自动更新程序:已禁用') + msg = '自动更新程序:已禁用' + if raise_error: + from toolbox import trimmed_format_exc + msg += trimmed_format_exc() + print(msg) def warm_up_modules(): print('正在执行一些模块的预热...') diff --git a/config.py b/config.py index c95e230..2617aff 100644 --- a/config.py +++ b/config.py @@ -46,7 +46,7 @@ MAX_RETRY = 2 # OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"] # 本地LLM模型如ChatGLM的执行方式 CPU/GPU LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" diff --git a/docs/waifu_plugin/autoload.js b/docs/waifu_plugin/autoload.js index 6922fff..3464a5c 100644 --- a/docs/waifu_plugin/autoload.js +++ b/docs/waifu_plugin/autoload.js @@ -16,6 +16,13 @@ try { live2d_settings['canTakeScreenshot'] = false; live2d_settings['canTurnToHomePage'] = false; live2d_settings['canTurnToAboutPage'] = false; + live2d_settings['showHitokoto'] = false; // 显示一言 + live2d_settings['showF12Status'] = false; // 显示加载状态 + live2d_settings['showF12Message'] = false; // 显示看板娘消息 + live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示 + live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示 + live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词 + /* 在 initModel 前添加 */ initModel("file=docs/waifu_plugin/waifu-tips.json"); }}); diff --git a/request_llm/bridge_chatglm.py b/request_llm/bridge_chatglm.py index 7c86a22..3300286 100644 --- a/request_llm/bridge_chatglm.py +++ b/request_llm/bridge_chatglm.py @@ -87,7 +87,7 @@ class GetGLMHandle(Process): global glm_handle glm_handle = None ################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): +def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 函数的说明请见 request_llm/bridge_all.py @@ -95,7 +95,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", global glm_handle if glm_handle is None: glm_handle = GetGLMHandle() - observe_window[0] = load_message + "\n\n" + glm_handle.info + if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info if not glm_handle.success: error = glm_handle.info glm_handle = None @@ -110,7 +110,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - observe_window[0] = response + if len(observe_window) >= 1: observe_window[0] = response if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") diff --git a/request_llm/bridge_moss.py b/request_llm/bridge_moss.py index 06aafb5..a8be91b 100644 --- a/request_llm/bridge_moss.py +++ b/request_llm/bridge_moss.py @@ -153,7 +153,8 @@ class GetGLMHandle(Process): print(response.lstrip('\n')) self.child.send(response.lstrip('\n')) except: - self.child.send('[Local Message] Call MOSS fail.') + from toolbox import trimmed_format_exc + self.child.send('[Local Message] Call MOSS fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n') # 请求处理结束,开始下一个循环 self.child.send('[Finish]') @@ -217,6 +218,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if not moss_handle.success: moss_handle = None return + else: + response = "[Local Message]: 等待MOSS响应中 ..." + chatbot[-1] = (inputs, response) + yield from update_ui(chatbot=chatbot, history=history) if additional_fn is not None: import core_functional @@ -231,15 +236,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收chatglm的回复 - response = "[Local Message]: 等待MOSS响应中 ..." - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) + chatbot[-1] = (inputs, response.strip('<|MOSS|>: ')) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 if response == "[Local Message]: 等待MOSS响应中 ...": response = "[Local Message]: MOSS响应异常 ..." - history.extend([inputs, response]) + history.extend([inputs, response.strip('<|MOSS|>: ')]) yield from update_ui(chatbot=chatbot, history=history) From 84fc8647f7254e5866e562f7e2dfc0cec2067391 Mon Sep 17 00:00:00 2001 From: fuqingxu <505030475@qq.com> Date: Mon, 8 May 2023 20:06:41 +0800 Subject: [PATCH 07/26] =?UTF-8?q?=E4=BF=AE=E6=AD=A3moss=E5=92=8Cchatglm?= =?UTF-8?q?=E7=9A=84=E7=8E=AF=E5=A2=83=E4=BE=9D=E8=B5=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/GithubAction+ChatGLM+Moss | 9 ++------- request_llm/bridge_chatglm.py | 3 ++- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/docs/GithubAction+ChatGLM+Moss b/docs/GithubAction+ChatGLM+Moss index 85888e2..ece19d6 100644 --- a/docs/GithubAction+ChatGLM+Moss +++ b/docs/GithubAction+ChatGLM+Moss @@ -3,7 +3,7 @@ FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 ARG useProxyNetwork='' RUN apt-get update -RUN apt-get install -y curl proxychains curl +RUN apt-get install -y curl proxychains curl gcc RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing @@ -21,12 +21,7 @@ RUN python3 -m pip install -r request_llm/requirements_moss.txt RUN python3 -m pip install -r request_llm/requirements_chatglm.txt RUN python3 -m pip install -r request_llm/requirements_newbing.txt -# # 预热CHATGLM参数(非必要 可选步骤) -# RUN echo ' \n\ -# from transformers import AutoModel, AutoTokenizer \n\ -# chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\ -# chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py -# RUN python3 -u warm_up_chatglm.py + # 预热Tiktoken模块 RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' diff --git a/request_llm/bridge_chatglm.py b/request_llm/bridge_chatglm.py index 3300286..100783d 100644 --- a/request_llm/bridge_chatglm.py +++ b/request_llm/bridge_chatglm.py @@ -68,7 +68,8 @@ class GetGLMHandle(Process): # command = self.child.recv() # if command == '[Terminate]': break except: - self.child.send('[Local Message] Call ChatGLM fail.') + from toolbox import trimmed_format_exc + self.child.send('[Local Message] Call ChatGLM fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n') # 请求处理结束,开始下一个循环 self.child.send('[Finish]') From 624d203bbc90204ca8775c03774253ff80658fcf Mon Sep 17 00:00:00 2001 From: fuqingxu <505030475@qq.com> Date: Mon, 8 May 2023 20:09:54 +0800 Subject: [PATCH 08/26] update docker compose --- docker-compose.yml | 41 ++++++++++++----------------------------- 1 file changed, 12 insertions(+), 29 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 2aa666d..90d5cb5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,34 +1,30 @@ -【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line +#【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line ## =================================================== -## 【方案一】 如果不需要运行本地模型(仅chatgpt类远程服务) +## 【方案一】 如果不需要运行本地模型(仅chatgpt,newbing类远程服务) ## =================================================== version: '3' services: gpt_academic_nolocalllms: - image: fuqingxu/gpt_academic:no-local-llms + image: ghcr.io/binary-husky/gpt_academic_nolocal:master environment: # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' + API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' USE_PROXY: ' True ' proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-4"] ' - DEFAULT_WORKER_NUM: ' 10 ' + AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "newbing"] ' WEB_PORT: ' 22303 ' ADD_WAIFU: ' True ' - AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' + # DEFAULT_WORKER_NUM: ' 10 ' + # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' # 与宿主的网络融合 network_mode: "host" # 不使用代理网络拉取最新代码 command: > - bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' && - git checkout master --force && - git remote set-url origin https://github.com/binary-husky/chatgpt_academic.git && - git pull && - python3 -u main.py" + bash -c "python3 -u main.py" ### =================================================== @@ -37,19 +33,19 @@ services: version: '3' services: gpt_academic_with_chatglm: - image: fuqingxu/gpt_academic:chatgpt-chatglm-newbing # [option 2] 如果需要运行ChatGLM本地模型 + image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master environment: # 请查阅 `config.py` 以查看所有的配置信息 API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' USE_PROXY: ' True ' proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-4", "chatglm"] ' + AVAIL_LLM_MODELS: ' ["chatglm", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"] ' LOCAL_MODEL_DEVICE: ' cuda ' DEFAULT_WORKER_NUM: ' 10 ' WEB_PORT: ' 12303 ' ADD_WAIFU: ' True ' - AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' + # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' # 显卡的使用,nvidia0指第0个GPU runtime: nvidia @@ -58,21 +54,8 @@ services: # 与宿主的网络融合 network_mode: "host" - - # 使用代理网络拉取最新代码 - # command: > - # bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' && - # truncate -s -1 /etc/proxychains.conf && - # echo \"socks5 127.0.0.1 10880\" >> /etc/proxychains.conf && - # proxychains git pull && - # python3 -u main.py " - - # 不使用代理网络拉取最新代码 command: > - bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' && - git pull && - python3 -u main.py" - + bash -c "python3 -u main.py" ### =================================================== ### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型 From 88ac4cf0a7c481e7a3adecc18b818d07bcc9ecec Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:12:38 +0800 Subject: [PATCH 09/26] Update README.md --- README.md | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index addf043..9bd995b 100644 --- a/README.md +++ b/README.md @@ -157,14 +157,9 @@ docker run --rm -it -p 50923:50923 gpt-academic 2. ChatGPT+ChatGLM(需要对Docker熟悉 + 读懂Dockerfile + 电脑配置够强) ``` sh -# 修改Dockerfile -cd docs && nano Dockerfile+ChatGLM -# 构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs) -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# 运行 (1) 直接运行: -docker run --rm -it --net=host --gpus=all gpt-academic -# 运行 (2) 我想运行之前进容器做一些调整: -docker run --rm -it --net=host --gpus=all gpt-academic bash +1. 修改docker-compose.yml,删除方案二和方案三,保留方案二 +2. 修改docker-compose.yml中方案二的配置,参考其中注释即可 +3. 终端运行 docker-compose up ``` 3. ChatGPT + LLAMA + 盘古 + RWKV(需要精通Docker) From 8f9c5c50394ba61b8b151c879de24a75a601560f Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:13:32 +0800 Subject: [PATCH 10/26] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9bd995b..d1efa4b 100644 --- a/README.md +++ b/README.md @@ -154,7 +154,7 @@ docker run --rm -it --net=host gpt-academic docker run --rm -it -p 50923:50923 gpt-academic ``` -2. ChatGPT+ChatGLM(需要对Docker熟悉 + 读懂Dockerfile + 电脑配置够强) +2. ChatGPT+ChatGLM+MOSS(需要熟悉Docker) ``` sh 1. 修改docker-compose.yml,删除方案二和方案三,保留方案二 @@ -162,7 +162,7 @@ docker run --rm -it -p 50923:50923 gpt-academic 3. 终端运行 docker-compose up ``` -3. ChatGPT + LLAMA + 盘古 + RWKV(需要精通Docker) +3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉Docker) ``` sh 1. 修改docker-compose.yml,删除方案一和方案二,保留方案三(基于jittor) 2. 修改docker-compose.yml中方案三的配置,参考其中注释即可 From 1bb45d4998be7f14d060631a49afcb744a578ac1 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:16:43 +0800 Subject: [PATCH 11/26] Update docker-compose.yml --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 90d5cb5..9465a62 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -70,7 +70,7 @@ services: USE_PROXY: ' True ' proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-4", "jittorllms_rwkv"] ' + AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] ' LOCAL_MODEL_DEVICE: ' cuda ' DEFAULT_WORKER_NUM: ' 10 ' WEB_PORT: ' 12305 ' From 98269e87082f4df6833102ae39dd00b76239f25f Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:21:28 +0800 Subject: [PATCH 12/26] Update README.md --- README.md | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index d1efa4b..eca93a9 100644 --- a/README.md +++ b/README.md @@ -99,23 +99,20 @@ cd chatgpt_academic 3. 安装依赖 ```sh -# (选择I: 如熟悉python)(python版本3.9以上,越新越好) +# (选择I: 如熟悉python)(python版本3.9以上,越新越好),备注:使用官方pip源或者阿里pip源,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ python -m pip install -r requirements.txt -# 备注:使用官方pip源或者阿里pip源,其他pip源(如一些大学的pip)有可能出问题,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的: -# (II-1)conda create -n gptac_venv python=3.11 -# (II-2)conda activate gptac_venv -# (II-3)python -m pip install -r requirements.txt +# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 +conda activate gptac_venv +python -m pip install -r requirements.txt ``` 【非必要可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): ```sh # 【非必要可选步骤I】支持清华ChatGLM python -m pip install -r request_llm/requirements_chatglm.txt -## 清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: -## 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda -## 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +## 清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) # 【非必要可选步骤II】支持复旦MOSS python -m pip install -r request_llm/requirements_moss.txt From 397dc2d0dc2530fd1f00eb8c114aaae435a7fae0 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:22:43 +0800 Subject: [PATCH 13/26] Update README.md --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index eca93a9..aa8fe36 100644 --- a/README.md +++ b/README.md @@ -110,16 +110,15 @@ python -m pip install -r requirements.txt 【非必要可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): ```sh -# 【非必要可选步骤I】支持清华ChatGLM +# 【非必要可选步骤I】支持清华ChatGLM。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) python -m pip install -r request_llm/requirements_chatglm.txt -## 清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) # 【非必要可选步骤II】支持复旦MOSS python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径 # 【非必要可选步骤III】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] ``` 4. 运行 From 00e7fbd7fab6a0a02634712ec2fd49f5431b87a3 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:27:18 +0800 Subject: [PATCH 14/26] Update README.md --- README.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index aa8fe36..1558c34 100644 --- a/README.md +++ b/README.md @@ -103,24 +103,32 @@ cd chatgpt_academic python -m pip install -r requirements.txt # (选择II: 如不熟悉python)使用anaconda,步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 -conda activate gptac_venv -python -m pip install -r requirements.txt +conda create -n gptac_venv python=3.11 # 创建anaconda环境 +conda activate gptac_venv # 激活anaconda环境 +python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤 ``` -【非必要可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): +
如果需要支持清华ChatGLM/复旦MOSS作为后端,请点击展开此处 +

+ +【可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): ```sh -# 【非必要可选步骤I】支持清华ChatGLM。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +# 【可选步骤I】支持清华ChatGLM。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) python -m pip install -r request_llm/requirements_chatglm.txt -# 【非必要可选步骤II】支持复旦MOSS +# 【可选步骤II】支持复旦MOSS python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径 -# 【非必要可选步骤III】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): +# 【可选步骤III】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] ``` +

+
+ + + 4. 运行 ```sh python main.py From 2fa52f71e754c204fc25a5856518b3373f95f96f Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:31:35 +0800 Subject: [PATCH 15/26] Update README.md --- README.md | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 1558c34..758b329 100644 --- a/README.md +++ b/README.md @@ -145,17 +145,13 @@ python main.py 1. 仅ChatGPT(推荐大多数人选择) ``` sh -# 下载项目 -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# 配置 “Proxy”, “API_KEY” 以及 “WEB_PORT” (例如50923) 等 -用任意文本编辑器编辑 config.py -# 安装 -docker build -t gpt-academic . -#(最后一步-选择1)在Linux环境下,用`--net=host`更方便快捷 -docker run --rm -it --net=host gpt-academic -#(最后一步-选择2)在macOS/windows环境下,只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口 -docker run --rm -it -p 50923:50923 gpt-academic +git clone https://github.com/binary-husky/chatgpt_academic.git # 下载项目 +cd chatgpt_academic # 进入路径 +nano config.py # 用任意文本编辑器编辑config.py, 配置 “Proxy”, “API_KEY” 以及 “WEB_PORT” (例如50923) 等 +docker build -t gpt-academic . # 安装 + +docker run --rm -it --net=host gpt-academic #(最后一步-选择1)在Linux环境下,用`--net=host`更方便快捷 +docker run --rm -it -p 50923:50923 gpt-academic #(最后一步-选择2)在macOS/windows环境下,只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口 ``` 2. ChatGPT+ChatGLM+MOSS(需要熟悉Docker) From 24a832608c906b3b2b8c7797326c84e7285e1334 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:32:18 +0800 Subject: [PATCH 16/26] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 758b329..0858766 100644 --- a/README.md +++ b/README.md @@ -157,14 +157,14 @@ docker run --rm -it -p 50923:50923 gpt-academic #(最后一步-选择2) 2. ChatGPT+ChatGLM+MOSS(需要熟悉Docker) ``` sh -1. 修改docker-compose.yml,删除方案二和方案三,保留方案二 +1. 修改docker-compose.yml,删除方案1和方案3,保留方案2 2. 修改docker-compose.yml中方案二的配置,参考其中注释即可 3. 终端运行 docker-compose up ``` 3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉Docker) ``` sh -1. 修改docker-compose.yml,删除方案一和方案二,保留方案三(基于jittor) +1. 修改docker-compose.yml,删除方案1和方案2,保留方案3(基于jittor) 2. 修改docker-compose.yml中方案三的配置,参考其中注释即可 3. 终端运行 docker-compose up ``` From f54872007fde59d81d4e3d8e577aaea70b1d0d4c Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:33:32 +0800 Subject: [PATCH 17/26] Update README.md --- README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0858766..e1482e4 100644 --- a/README.md +++ b/README.md @@ -157,16 +157,14 @@ docker run --rm -it -p 50923:50923 gpt-academic #(最后一步-选择2) 2. ChatGPT+ChatGLM+MOSS(需要熟悉Docker) ``` sh -1. 修改docker-compose.yml,删除方案1和方案3,保留方案2 -2. 修改docker-compose.yml中方案二的配置,参考其中注释即可 -3. 终端运行 docker-compose up +# 修改docker-compose.yml,删除方案1和方案3,保留方案2。修改docker-compose.yml中方案2的配置,参考其中注释即可 +docker-compose up ``` 3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉Docker) ``` sh -1. 修改docker-compose.yml,删除方案1和方案2,保留方案3(基于jittor) -2. 修改docker-compose.yml中方案三的配置,参考其中注释即可 -3. 终端运行 docker-compose up +# 修改docker-compose.yml,删除方案1和方案2,保留方案3。修改docker-compose.yml中方案3的配置,参考其中注释即可 +docker-compose up ``` From 1134ec2df53a7a573ad4ffc45f5975dab0b7bad2 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 8 May 2023 20:33:47 +0800 Subject: [PATCH 18/26] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e1482e4..c632097 100644 --- a/README.md +++ b/README.md @@ -154,7 +154,7 @@ docker run --rm -it --net=host gpt-academic #(最后一步-选择1) docker run --rm -it -p 50923:50923 gpt-academic #(最后一步-选择2)在macOS/windows环境下,只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口 ``` -2. ChatGPT+ChatGLM+MOSS(需要熟悉Docker) +2. ChatGPT + ChatGLM + MOSS(需要熟悉Docker) ``` sh # 修改docker-compose.yml,删除方案1和方案3,保留方案2。修改docker-compose.yml中方案2的配置,参考其中注释即可 From 18a59598ea77fada86420273331993a01ae84f21 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Thu, 11 May 2023 18:11:19 +0800 Subject: [PATCH 19/26] Update README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c632097..a331f34 100644 --- a/README.md +++ b/README.md @@ -150,8 +150,10 @@ cd chatgpt_academic # 进入路径 nano config.py # 用任意文本编辑器编辑config.py, 配置 “Proxy”, “API_KEY” 以及 “WEB_PORT” (例如50923) 等 docker build -t gpt-academic . # 安装 -docker run --rm -it --net=host gpt-academic #(最后一步-选择1)在Linux环境下,用`--net=host`更方便快捷 -docker run --rm -it -p 50923:50923 gpt-academic #(最后一步-选择2)在macOS/windows环境下,只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口 +#(最后一步-选择1)在Linux环境下,用`--net=host`更方便快捷 +docker run --rm -it --net=host gpt-academic +#(最后一步-选择2)在macOS/windows环境下,只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口 +docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic ``` 2. ChatGPT + ChatGLM + MOSS(需要熟悉Docker) From dadbb711477a21810ff40eb9864ecf40d12365d0 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Thu, 11 May 2023 18:42:51 +0800 Subject: [PATCH 20/26] Update bridge_chatgpt.py --- request_llm/bridge_chatgpt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index 48eaba0..aa6ae72 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -216,7 +216,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp else: from toolbox import regular_txt_to_markdown tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded[4:])}") + chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 return From fdb9650ccacade0774ffa06dc1dc62488aeef175 Mon Sep 17 00:00:00 2001 From: 505030475 <505030475@qq.com> Date: Fri, 12 May 2023 23:05:16 +0800 Subject: [PATCH 21/26] word file format reminder --- crazy_functions/总结word文档.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index f1fe201..eada69d 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -85,7 +85,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr # 基本信息:功能、贡献者 chatbot.append([ "函数插件功能?", - "批量总结Word文档。函数插件贡献者: JasonGuo1"]) + "批量总结Word文档。函数插件贡献者: JasonGuo1。注意, 如果是.doc文件, 请先转化为.docx格式。"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 尝试导入依赖,如果缺少依赖,则给出安装建议 From 08e184ea559ca75c5cd98fab579328fca9c4170c Mon Sep 17 00:00:00 2001 From: 505030475 <505030475@qq.com> Date: Sat, 13 May 2023 00:28:29 +0800 Subject: [PATCH 22/26] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=9B=BE=E7=89=87?= =?UTF-8?q?=E7=94=9F=E6=88=90=E6=8E=A5=E5=8F=A3=E6=8F=92=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 10 ++++++ crazy_functions/图片生成.py | 64 +++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) create mode 100644 crazy_functions/图片生成.py diff --git a/crazy_functional.py b/crazy_functional.py index 23cbd30..3e7b12f 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -236,5 +236,15 @@ def get_crazy_functions(): "Function": HotReload(同时问询_指定模型) }, }) + from crazy_functions.图片生成 import 图片生成 + function_plugins.update({ + "图片生成(先切换模型到openai或api2d)": { + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示 + "Function": HotReload(图片生成) + }, + }) ###################### 第n组插件 ########################### return function_plugins diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py new file mode 100644 index 0000000..ae832c5 --- /dev/null +++ b/crazy_functions/图片生成.py @@ -0,0 +1,64 @@ +from toolbox import CatchException, update_ui, get_conf, select_api_key +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +import datetime + + +def gen_image(llm_kwargs, prompt, resolution="256x256"): + import requests, json, time, os + from request_llm.bridge_all import model_info + + proxies, = get_conf('proxies') + # Set up OpenAI API key and model + api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + # 'https://api.openai.com/v1/chat/completions' + img_endpoint = chat_endpoint.replace('chat/completions','images/generations') + # # Generate the image + url = img_endpoint + headers = { + 'Authorization': f"Bearer {api_key}", + 'Content-Type': 'application/json' + } + data = { + 'prompt': prompt, + 'n': 1, + 'size': '256x256', + 'response_format': 'url' + } + response = requests.post(url, headers=headers, json=data, proxies=proxies) + print(response.content) + image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] + + # 文件保存到本地 + r = requests.get(image_url, proxies=proxies) + file_path = 'gpt_log/image_gen/' + os.makedirs(file_path, exist_ok=True) + file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png' + with open(file_path+file_name, 'wb+') as f: f.write(r.content) + + + return image_url, file_path+file_name + + + +@CatchException +def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,暂时没有用武之地 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + history = [] # 清空历史,以免输入溢出 + chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-xxxx或者api2d-xxxx。如果中文效果不理想, 尝试Prompt。正在处理中 .....")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + resolution = plugin_kwargs.get("advanced_arg", '256x256') + image_url, image_path = gen_image(llm_kwargs, prompt, resolution) + chatbot.append([prompt, + f'`{image_url}`\n\n'+ + f'
' + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 From 986653b43e7319627e21489e0dd01c13dd51227f Mon Sep 17 00:00:00 2001 From: 505030475 <505030475@qq.com> Date: Sat, 13 May 2023 14:00:07 +0800 Subject: [PATCH 23/26] resolution --- crazy_functions/图片生成.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py index ae832c5..d9e2787 100644 --- a/crazy_functions/图片生成.py +++ b/crazy_functions/图片生成.py @@ -22,7 +22,7 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"): data = { 'prompt': prompt, 'n': 1, - 'size': '256x256', + 'size': resolution, 'response_format': 'url' } response = requests.post(url, headers=headers, json=data, proxies=proxies) From e4de1549a3638d25b9d666d8889c5b26e23dc3f3 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Sat, 13 May 2023 14:07:42 +0800 Subject: [PATCH 24/26] Update README.md --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a331f34..b2cddba 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ chat分析报告生成 | [函数插件] 运行后自动生成总结汇报 启动暗色gradio[主题](https://github.com/binary-husky/chatgpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题 [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持,[API2D](https://api2d.com/)接口支持 | 同时被GPT3.5、GPT4、[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧? 更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama),[RWKV](https://github.com/BlinkDL/ChatRWKV)和[盘古α](https://openi.org.cn/pangu/) -…… | …… +更多新功能展示(图像生成等) …… | 见本文档结尾处 …… @@ -262,6 +262,11 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h +8. OpenAI图像生成 +
+ +
+ ## 版本: - version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级) From d52c0c4783f3af54d0125e2859d36f2c8f795829 Mon Sep 17 00:00:00 2001 From: 505030475 <505030475@qq.com> Date: Sat, 13 May 2023 14:20:34 +0800 Subject: [PATCH 25/26] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E8=BE=93=E5=87=BA?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/图片生成.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py index d9e2787..ecb75cd 100644 --- a/crazy_functions/图片生成.py +++ b/crazy_functions/图片生成.py @@ -58,7 +58,9 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro resolution = plugin_kwargs.get("advanced_arg", '256x256') image_url, image_path = gen_image(llm_kwargs, prompt, resolution) chatbot.append([prompt, - f'`{image_url}`\n\n'+ - f'
' + f'图像中转网址:
`{image_url}`
'+ + f'中转网址预览:
' + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 From c0e57e0e396e04cde40658e0f6ba2bf2fced0bd8 Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Sun, 14 May 2023 15:18:33 +0800 Subject: [PATCH 26/26] fix bool env read bug --- toolbox.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/toolbox.py b/toolbox.py index bdd99c9..6f5469e 100644 --- a/toolbox.py +++ b/toolbox.py @@ -545,7 +545,10 @@ def read_env_variable(arg, default_value): print(f"[ENV_VAR] 尝试加载{arg},默认值:{default_value} --> 修正值:{env_arg}") try: if isinstance(default_value, bool): - r = bool(env_arg) + env_arg = env_arg.strip() + if env_arg == 'True': r = True + elif env_arg == 'False': r = False + else: print('enter True or False, but have:', env_arg); r = default_value elif isinstance(default_value, int): r = int(env_arg) elif isinstance(default_value, float):