From c0ed2131f01b4d52a8d6f965af48d43de3a47b43 Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 18:33:41 +0800
Subject: [PATCH 01/26] Update and rename bug_report.md to bug_report.yml
---
.github/ISSUE_TEMPLATE/bug_report.md | 25 -------------
.github/ISSUE_TEMPLATE/bug_report.yml | 54 +++++++++++++++++++++++++++
2 files changed, 54 insertions(+), 25 deletions(-)
delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md
create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index ac66876..0000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,25 +0,0 @@
----
-name: Bug report
-about: Create a report to help us improve
-title: ''
-labels: ''
-assignees: ''
-
----
-
-- **(1) Describe the bug 简述**
-
-
-- **(2) Screen Shot 截图**
-
-
-- **(3) Terminal Traceback 终端traceback(如有)**
-
-
-- **(4) Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)**
-
-
-
-Before submitting an issue 提交issue之前:
-- Please try to upgrade your code. 如果您的代码不是最新的,建议您先尝试更新代码
-- Please check project wiki for common problem solutions.项目[wiki](https://github.com/binary-husky/chatgpt_academic/wiki)有一些常见问题的解决方法
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 0000000..5aa8574
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,54 @@
+name: Report Bug | 报告BUG
+description: "Report bug"
+title: "[Bug]: "
+labels: []
+body:
+ - type: dropdown
+ id: download
+ attributes:
+ label: Installation Method | 安装方法与平台
+ options:
+ - Pip (我确认使用了最新的requirements.txt安装依赖)
+ - Anaconda (我确认使用了最新的requirements.txt安装依赖)
+ - Docker(Windows/Mac)
+ - Docker(Linux)
+ - Docker-Compose(Windows/Mac)
+ - Docker-Compose(Linux)
+ validations:
+ required: true
+
+ - type: textarea
+ id: logs
+ attributes:
+ label: Describe the bug | 简述
+ description: Describe the bug | 简述
+ validations:
+ required: true
+
+ - type: textarea
+ id: logs
+ attributes:
+ label: Screen Shot | 截图
+ description: Screen Shot | 截图
+
+ - type: textarea
+ id: logs
+ attributes:
+ label: Terminal Traceback 终端traceback(如有)
+ description: Terminal Traceback 终端traceback(如有)
+
+ - type: textarea
+ id: logs
+ attributes:
+ label: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)
+ description: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)
+
+
+
+
+
+
+
+
+
+
From 111a65e9e8c6c0ebc1e8810ec879f4de2932f59d Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 18:34:55 +0800
Subject: [PATCH 02/26] Update bug_report.yml
---
.github/ISSUE_TEMPLATE/bug_report.yml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 5aa8574..0ed6332 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -18,7 +18,7 @@ body:
required: true
- type: textarea
- id: logs
+ id: describe
attributes:
label: Describe the bug | 简述
description: Describe the bug | 简述
@@ -26,19 +26,19 @@ body:
required: true
- type: textarea
- id: logs
+ id: screenshot
attributes:
label: Screen Shot | 截图
description: Screen Shot | 截图
- type: textarea
- id: logs
+ id: traceback
attributes:
label: Terminal Traceback 终端traceback(如有)
description: Terminal Traceback 终端traceback(如有)
- type: textarea
- id: logs
+ id: material
attributes:
label: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)
description: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)
From f8209e51f5abd2bdd0f2ace23a46ff65b36d7ab1 Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 18:40:35 +0800
Subject: [PATCH 03/26] Update bug_report.yml
---
.github/ISSUE_TEMPLATE/bug_report.yml | 24 ++++--------------------
1 file changed, 4 insertions(+), 20 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 0ed6332..fb00290 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -20,32 +20,16 @@ body:
- type: textarea
id: describe
attributes:
- label: Describe the bug | 简述
- description: Describe the bug | 简述
+ label: Describe the bug & Screen Shot | 简述 与 有帮助的截图
+ description: Describe the bug & Screen Shot | 简述 与 有帮助的截图
validations:
required: true
-
- - type: textarea
- id: screenshot
- attributes:
- label: Screen Shot | 截图
- description: Screen Shot | 截图
- type: textarea
id: traceback
attributes:
- label: Terminal Traceback 终端traceback(如有)
- description: Terminal Traceback 终端traceback(如有)
-
- - type: textarea
- id: material
- attributes:
- label: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)
- description: Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)
-
-
-
-
+ label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
+ description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
From 2dd65af9f0d20d56f8105801ec40fc0477b85c6b Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 18:42:52 +0800
Subject: [PATCH 04/26] Update bug_report.yml
---
.github/ISSUE_TEMPLATE/bug_report.yml | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index fb00290..7a3c51b 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -8,12 +8,14 @@ body:
attributes:
label: Installation Method | 安装方法与平台
options:
- - Pip (我确认使用了最新的requirements.txt安装依赖)
- - Anaconda (我确认使用了最新的requirements.txt安装依赖)
+ - Pip (please confirm: used latest requirements.txt)
+ - Anaconda (please confirm: used latest requirements.txt)
- Docker(Windows/Mac)
- Docker(Linux)
- Docker-Compose(Windows/Mac)
- Docker-Compose(Linux)
+ - Huggingface
+ - Others
validations:
required: true
From 3f251e45713fa79f384a04e4dd3182702ad2b33e Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 18:45:23 +0800
Subject: [PATCH 05/26] Update bug_report.yml
---
.github/ISSUE_TEMPLATE/bug_report.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 7a3c51b..4e7a1fc 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -8,14 +8,14 @@ body:
attributes:
label: Installation Method | 安装方法与平台
options:
- - Pip (please confirm: used latest requirements.txt)
- - Anaconda (please confirm: used latest requirements.txt)
+ - Pip Install (I used latest requirements.txt and python>=3.8)
+ - Anaconda (I used latest requirements.txt and python>=3.8)
- Docker(Windows/Mac)
- Docker(Linux)
- Docker-Compose(Windows/Mac)
- Docker-Compose(Linux)
- Huggingface
- - Others
+ - Others (Please Describe)
validations:
required: true
From 777850200deb1933fdc97f16a693f786a973ca22 Mon Sep 17 00:00:00 2001
From: fuqingxu <505030475@qq.com>
Date: Mon, 8 May 2023 19:21:17 +0800
Subject: [PATCH 06/26] update the error handling of moss and chatglm
---
check_proxy.py | 14 +++++++++++---
config.py | 2 +-
docs/waifu_plugin/autoload.js | 7 +++++++
request_llm/bridge_chatglm.py | 6 +++---
request_llm/bridge_moss.py | 14 ++++++++------
5 files changed, 30 insertions(+), 13 deletions(-)
diff --git a/check_proxy.py b/check_proxy.py
index 754b5d3..977802d 100644
--- a/check_proxy.py
+++ b/check_proxy.py
@@ -94,7 +94,7 @@ def get_current_version():
return current_version
-def auto_update():
+def auto_update(raise_error=False):
"""
一键更新协议:查询版本和用户意见
"""
@@ -126,14 +126,22 @@ def auto_update():
try:
patch_and_restart(path)
except:
- print('更新失败。')
+ msg = '更新失败。'
+ if raise_error:
+ from toolbox import trimmed_format_exc
+ msg += trimmed_format_exc()
+ print(msg)
else:
print('自动更新程序:已禁用')
return
else:
return
except:
- print('自动更新程序:已禁用')
+ msg = '自动更新程序:已禁用'
+ if raise_error:
+ from toolbox import trimmed_format_exc
+ msg += trimmed_format_exc()
+ print(msg)
def warm_up_modules():
print('正在执行一些模块的预热...')
diff --git a/config.py b/config.py
index c95e230..2617aff 100644
--- a/config.py
+++ b/config.py
@@ -46,7 +46,7 @@ MAX_RETRY = 2
# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
-AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing"]
+AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"]
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
diff --git a/docs/waifu_plugin/autoload.js b/docs/waifu_plugin/autoload.js
index 6922fff..3464a5c 100644
--- a/docs/waifu_plugin/autoload.js
+++ b/docs/waifu_plugin/autoload.js
@@ -16,6 +16,13 @@ try {
live2d_settings['canTakeScreenshot'] = false;
live2d_settings['canTurnToHomePage'] = false;
live2d_settings['canTurnToAboutPage'] = false;
+ live2d_settings['showHitokoto'] = false; // 显示一言
+ live2d_settings['showF12Status'] = false; // 显示加载状态
+ live2d_settings['showF12Message'] = false; // 显示看板娘消息
+ live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
+ live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
+ live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
+
/* 在 initModel 前添加 */
initModel("file=docs/waifu_plugin/waifu-tips.json");
}});
diff --git a/request_llm/bridge_chatglm.py b/request_llm/bridge_chatglm.py
index 7c86a22..3300286 100644
--- a/request_llm/bridge_chatglm.py
+++ b/request_llm/bridge_chatglm.py
@@ -87,7 +87,7 @@ class GetGLMHandle(Process):
global glm_handle
glm_handle = None
#################################################################################
-def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
+def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
@@ -95,7 +95,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
global glm_handle
if glm_handle is None:
glm_handle = GetGLMHandle()
- observe_window[0] = load_message + "\n\n" + glm_handle.info
+ if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info
if not glm_handle.success:
error = glm_handle.info
glm_handle = None
@@ -110,7 +110,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- observe_window[0] = response
+ if len(observe_window) >= 1: observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
diff --git a/request_llm/bridge_moss.py b/request_llm/bridge_moss.py
index 06aafb5..a8be91b 100644
--- a/request_llm/bridge_moss.py
+++ b/request_llm/bridge_moss.py
@@ -153,7 +153,8 @@ class GetGLMHandle(Process):
print(response.lstrip('\n'))
self.child.send(response.lstrip('\n'))
except:
- self.child.send('[Local Message] Call MOSS fail.')
+ from toolbox import trimmed_format_exc
+ self.child.send('[Local Message] Call MOSS fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
# 请求处理结束,开始下一个循环
self.child.send('[Finish]')
@@ -217,6 +218,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if not moss_handle.success:
moss_handle = None
return
+ else:
+ response = "[Local Message]: 等待MOSS响应中 ..."
+ chatbot[-1] = (inputs, response)
+ yield from update_ui(chatbot=chatbot, history=history)
if additional_fn is not None:
import core_functional
@@ -231,15 +236,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
history_feedin.append([history[2*i], history[2*i+1]] )
# 开始接收chatglm的回复
- response = "[Local Message]: 等待MOSS响应中 ..."
- chatbot[-1] = (inputs, response)
- yield from update_ui(chatbot=chatbot, history=history)
for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- chatbot[-1] = (inputs, response)
+ chatbot[-1] = (inputs, response.strip('<|MOSS|>: '))
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == "[Local Message]: 等待MOSS响应中 ...":
response = "[Local Message]: MOSS响应异常 ..."
- history.extend([inputs, response])
+ history.extend([inputs, response.strip('<|MOSS|>: ')])
yield from update_ui(chatbot=chatbot, history=history)
From 84fc8647f7254e5866e562f7e2dfc0cec2067391 Mon Sep 17 00:00:00 2001
From: fuqingxu <505030475@qq.com>
Date: Mon, 8 May 2023 20:06:41 +0800
Subject: [PATCH 07/26] =?UTF-8?q?=E4=BF=AE=E6=AD=A3moss=E5=92=8Cchatglm?=
=?UTF-8?q?=E7=9A=84=E7=8E=AF=E5=A2=83=E4=BE=9D=E8=B5=96?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
docs/GithubAction+ChatGLM+Moss | 9 ++-------
request_llm/bridge_chatglm.py | 3 ++-
2 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/docs/GithubAction+ChatGLM+Moss b/docs/GithubAction+ChatGLM+Moss
index 85888e2..ece19d6 100644
--- a/docs/GithubAction+ChatGLM+Moss
+++ b/docs/GithubAction+ChatGLM+Moss
@@ -3,7 +3,7 @@
FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
ARG useProxyNetwork=''
RUN apt-get update
-RUN apt-get install -y curl proxychains curl
+RUN apt-get install -y curl proxychains curl gcc
RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
@@ -21,12 +21,7 @@ RUN python3 -m pip install -r request_llm/requirements_moss.txt
RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
RUN python3 -m pip install -r request_llm/requirements_newbing.txt
-# # 预热CHATGLM参数(非必要 可选步骤)
-# RUN echo ' \n\
-# from transformers import AutoModel, AutoTokenizer \n\
-# chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\
-# chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py
-# RUN python3 -u warm_up_chatglm.py
+
# 预热Tiktoken模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
diff --git a/request_llm/bridge_chatglm.py b/request_llm/bridge_chatglm.py
index 3300286..100783d 100644
--- a/request_llm/bridge_chatglm.py
+++ b/request_llm/bridge_chatglm.py
@@ -68,7 +68,8 @@ class GetGLMHandle(Process):
# command = self.child.recv()
# if command == '[Terminate]': break
except:
- self.child.send('[Local Message] Call ChatGLM fail.')
+ from toolbox import trimmed_format_exc
+ self.child.send('[Local Message] Call ChatGLM fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
# 请求处理结束,开始下一个循环
self.child.send('[Finish]')
From 624d203bbc90204ca8775c03774253ff80658fcf Mon Sep 17 00:00:00 2001
From: fuqingxu <505030475@qq.com>
Date: Mon, 8 May 2023 20:09:54 +0800
Subject: [PATCH 08/26] update docker compose
---
docker-compose.yml | 41 ++++++++++++-----------------------------
1 file changed, 12 insertions(+), 29 deletions(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 2aa666d..90d5cb5 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,34 +1,30 @@
-【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line
+#【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line
## ===================================================
-## 【方案一】 如果不需要运行本地模型(仅chatgpt类远程服务)
+## 【方案一】 如果不需要运行本地模型(仅chatgpt,newbing类远程服务)
## ===================================================
version: '3'
services:
gpt_academic_nolocalllms:
- image: fuqingxu/gpt_academic:no-local-llms
+ image: ghcr.io/binary-husky/gpt_academic_nolocal:master
environment:
# 请查阅 `config.py` 以查看所有的配置信息
- API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
+ API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
USE_PROXY: ' True '
proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
LLM_MODEL: ' gpt-3.5-turbo '
- AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-4"] '
- DEFAULT_WORKER_NUM: ' 10 '
+ AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "newbing"] '
WEB_PORT: ' 22303 '
ADD_WAIFU: ' True '
- AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
+ # DEFAULT_WORKER_NUM: ' 10 '
+ # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
# 与宿主的网络融合
network_mode: "host"
# 不使用代理网络拉取最新代码
command: >
- bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' &&
- git checkout master --force &&
- git remote set-url origin https://github.com/binary-husky/chatgpt_academic.git &&
- git pull &&
- python3 -u main.py"
+ bash -c "python3 -u main.py"
### ===================================================
@@ -37,19 +33,19 @@ services:
version: '3'
services:
gpt_academic_with_chatglm:
- image: fuqingxu/gpt_academic:chatgpt-chatglm-newbing # [option 2] 如果需要运行ChatGLM本地模型
+ image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master
environment:
# 请查阅 `config.py` 以查看所有的配置信息
API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
USE_PROXY: ' True '
proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
LLM_MODEL: ' gpt-3.5-turbo '
- AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-4", "chatglm"] '
+ AVAIL_LLM_MODELS: ' ["chatglm", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"] '
LOCAL_MODEL_DEVICE: ' cuda '
DEFAULT_WORKER_NUM: ' 10 '
WEB_PORT: ' 12303 '
ADD_WAIFU: ' True '
- AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
+ # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
# 显卡的使用,nvidia0指第0个GPU
runtime: nvidia
@@ -58,21 +54,8 @@ services:
# 与宿主的网络融合
network_mode: "host"
-
- # 使用代理网络拉取最新代码
- # command: >
- # bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' &&
- # truncate -s -1 /etc/proxychains.conf &&
- # echo \"socks5 127.0.0.1 10880\" >> /etc/proxychains.conf &&
- # proxychains git pull &&
- # python3 -u main.py "
-
- # 不使用代理网络拉取最新代码
command: >
- bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' &&
- git pull &&
- python3 -u main.py"
-
+ bash -c "python3 -u main.py"
### ===================================================
### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
From 88ac4cf0a7c481e7a3adecc18b818d07bcc9ecec Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 20:12:38 +0800
Subject: [PATCH 09/26] Update README.md
---
README.md | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/README.md b/README.md
index addf043..9bd995b 100644
--- a/README.md
+++ b/README.md
@@ -157,14 +157,9 @@ docker run --rm -it -p 50923:50923 gpt-academic
2. ChatGPT+ChatGLM(需要对Docker熟悉 + 读懂Dockerfile + 电脑配置够强)
``` sh
-# 修改Dockerfile
-cd docs && nano Dockerfile+ChatGLM
-# 构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs)
-docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
-# 运行 (1) 直接运行:
-docker run --rm -it --net=host --gpus=all gpt-academic
-# 运行 (2) 我想运行之前进容器做一些调整:
-docker run --rm -it --net=host --gpus=all gpt-academic bash
+1. 修改docker-compose.yml,删除方案二和方案三,保留方案二
+2. 修改docker-compose.yml中方案二的配置,参考其中注释即可
+3. 终端运行 docker-compose up
```
3. ChatGPT + LLAMA + 盘古 + RWKV(需要精通Docker)
From 8f9c5c50394ba61b8b151c879de24a75a601560f Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 20:13:32 +0800
Subject: [PATCH 10/26] Update README.md
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 9bd995b..d1efa4b 100644
--- a/README.md
+++ b/README.md
@@ -154,7 +154,7 @@ docker run --rm -it --net=host gpt-academic
docker run --rm -it -p 50923:50923 gpt-academic
```
-2. ChatGPT+ChatGLM(需要对Docker熟悉 + 读懂Dockerfile + 电脑配置够强)
+2. ChatGPT+ChatGLM+MOSS(需要熟悉Docker)
``` sh
1. 修改docker-compose.yml,删除方案二和方案三,保留方案二
@@ -162,7 +162,7 @@ docker run --rm -it -p 50923:50923 gpt-academic
3. 终端运行 docker-compose up
```
-3. ChatGPT + LLAMA + 盘古 + RWKV(需要精通Docker)
+3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉Docker)
``` sh
1. 修改docker-compose.yml,删除方案一和方案二,保留方案三(基于jittor)
2. 修改docker-compose.yml中方案三的配置,参考其中注释即可
From 1bb45d4998be7f14d060631a49afcb744a578ac1 Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 20:16:43 +0800
Subject: [PATCH 11/26] Update docker-compose.yml
---
docker-compose.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 90d5cb5..9465a62 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -70,7 +70,7 @@ services:
USE_PROXY: ' True '
proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
LLM_MODEL: ' gpt-3.5-turbo '
- AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-4", "jittorllms_rwkv"] '
+ AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] '
LOCAL_MODEL_DEVICE: ' cuda '
DEFAULT_WORKER_NUM: ' 10 '
WEB_PORT: ' 12305 '
From 98269e87082f4df6833102ae39dd00b76239f25f Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 20:21:28 +0800
Subject: [PATCH 12/26] Update README.md
---
README.md | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)
diff --git a/README.md b/README.md
index d1efa4b..eca93a9 100644
--- a/README.md
+++ b/README.md
@@ -99,23 +99,20 @@ cd chatgpt_academic
3. 安装依赖
```sh
-# (选择I: 如熟悉python)(python版本3.9以上,越新越好)
+# (选择I: 如熟悉python)(python版本3.9以上,越新越好),备注:使用官方pip源或者阿里pip源,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
python -m pip install -r requirements.txt
-# 备注:使用官方pip源或者阿里pip源,其他pip源(如一些大学的pip)有可能出问题,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
-# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的:
-# (II-1)conda create -n gptac_venv python=3.11
-# (II-2)conda activate gptac_venv
-# (II-3)python -m pip install -r requirements.txt
+# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
+conda create -n gptac_venv python=3.11
+conda activate gptac_venv
+python -m pip install -r requirements.txt
```
【非必要可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
```sh
# 【非必要可选步骤I】支持清华ChatGLM
python -m pip install -r request_llm/requirements_chatglm.txt
-## 清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下:
-## 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda
-## 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
+## 清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
# 【非必要可选步骤II】支持复旦MOSS
python -m pip install -r request_llm/requirements_moss.txt
From 397dc2d0dc2530fd1f00eb8c114aaae435a7fae0 Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 20:22:43 +0800
Subject: [PATCH 13/26] Update README.md
---
README.md | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index eca93a9..aa8fe36 100644
--- a/README.md
+++ b/README.md
@@ -110,16 +110,15 @@ python -m pip install -r requirements.txt
【非必要可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
```sh
-# 【非必要可选步骤I】支持清华ChatGLM
+# 【非必要可选步骤I】支持清华ChatGLM。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
-## 清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
# 【非必要可选步骤II】支持复旦MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径
# 【非必要可选步骤III】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
-AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
+AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
```
4. 运行
From 00e7fbd7fab6a0a02634712ec2fd49f5431b87a3 Mon Sep 17 00:00:00 2001
From: binary-husky <96192199+binary-husky@users.noreply.github.com>
Date: Mon, 8 May 2023 20:27:18 +0800
Subject: [PATCH 14/26] Update README.md
---
README.md | 22 +++++++++++++++-------
1 file changed, 15 insertions(+), 7 deletions(-)
diff --git a/README.md b/README.md
index aa8fe36..1558c34 100644
--- a/README.md
+++ b/README.md
@@ -103,24 +103,32 @@ cd chatgpt_academic
python -m pip install -r requirements.txt
# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
-conda create -n gptac_venv python=3.11
-conda activate gptac_venv
-python -m pip install -r requirements.txt
+conda create -n gptac_venv python=3.11 # 创建anaconda环境
+conda activate gptac_venv # 激活anaconda环境
+python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
```
-【非必要可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
+
+
+【可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
```sh
-# 【非必要可选步骤I】支持清华ChatGLM。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
+# 【可选步骤I】支持清华ChatGLM。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llm/requirements_chatglm.txt
-# 【非必要可选步骤II】支持复旦MOSS
+# 【可选步骤II】支持复旦MOSS
python -m pip install -r request_llm/requirements_moss.txt
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径
-# 【非必要可选步骤III】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
+# 【可选步骤III】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
```
+如果需要支持清华ChatGLM/复旦MOSS作为后端,请点击展开此处
+
+8. OpenAI图像生成
+