Compare commits
3 Commits
pip_core
...
enable_cle
| Author | SHA1 | Date | |
|---|---|---|---|
| 155a7e1174 | |||
| 86e33ea99a | |||
| e4ba0e6c85 |
@ -116,7 +116,7 @@ python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步
|
||||
```
|
||||
|
||||
|
||||
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
||||
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS作为后端,请点击展开此处</summary>
|
||||
<p>
|
||||
|
||||
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||
@ -128,10 +128,7 @@ python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径
|
||||
|
||||
# 【可选步骤III】支持RWKV Runner
|
||||
参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner
|
||||
|
||||
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
||||
# 【可选步骤III】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
```
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ def get_core_functions():
|
||||
# 按钮是否可见 (默认 True,即可见)
|
||||
"Visible": True,
|
||||
# 是否在触发时清除历史 (默认 False,即不处理之前的对话历史)
|
||||
"AutoClearHistory": True
|
||||
"AutoClearHistory": False
|
||||
},
|
||||
"中文学术润色": {
|
||||
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
||||
@ -83,11 +83,12 @@ def get_core_functions():
|
||||
}
|
||||
|
||||
|
||||
def handle_core_functionality(additional_fn, inputs, history):
|
||||
def handle_core_functionality(additional_fn, inputs, history, chatbot):
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
history = [] if core_functional[additional_fn].get("AutoClearHistory", False) else history
|
||||
if core_functional[additional_fn].get("AutoClearHistory", False):
|
||||
history = []
|
||||
return inputs, history
|
||||
|
||||
@ -145,7 +145,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
|
||||
@ -186,7 +186,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
|
||||
@ -130,7 +130,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
raw_input = inputs
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
|
||||
@ -117,7 +117,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
raw_input = inputs
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
|
||||
@ -291,7 +291,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
|
||||
@ -155,7 +155,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
|
||||
@ -155,7 +155,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
|
||||
@ -155,7 +155,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
|
||||
@ -225,7 +225,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
|
||||
@ -225,7 +225,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
|
||||
@ -249,7 +249,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
|
||||
@ -97,7 +97,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
"""
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history)
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
raw_input = "What I would like to say is the following: " + inputs
|
||||
history.extend([inputs, ""])
|
||||
|
||||
50
setup.py
50
setup.py
@ -1,50 +0,0 @@
|
||||
import setuptools, glob, os, fnmatch
|
||||
|
||||
with open("README.md", "r", encoding="utf-8") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
|
||||
def _process_requirements():
|
||||
packages = open('requirements.txt').read().strip().split('\n')
|
||||
requires = []
|
||||
for pkg in packages:
|
||||
if pkg.startswith('git+ssh'):
|
||||
return_code = os.system('pip install {}'.format(pkg))
|
||||
assert return_code == 0, 'error, status_code is: {}, exit!'.format(return_code)
|
||||
if pkg.startswith('./docs'):
|
||||
continue
|
||||
else:
|
||||
requires.append(pkg)
|
||||
return requires
|
||||
|
||||
def package_files(directory):
|
||||
import subprocess
|
||||
list_of_files = subprocess.check_output("git ls-files", shell=True).splitlines()
|
||||
return [str(k) for k in list_of_files]
|
||||
|
||||
extra_files = package_files('./')
|
||||
|
||||
setuptools.setup(
|
||||
name="void-terminal",
|
||||
version="0.0.0",
|
||||
author="Qingxu",
|
||||
author_email="505030475@qq.com",
|
||||
description="LLM based APIs",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/binary-husky/gpt-academic",
|
||||
project_urls={
|
||||
"Bug Tracker": "https://github.com/binary-husky/gpt-academic/issues",
|
||||
},
|
||||
classifiers=[
|
||||
"Programming Language :: Python :: 3",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
],
|
||||
package_dir={"": "."},
|
||||
package_data={"": extra_files},
|
||||
include_package_data=True,
|
||||
packages=setuptools.find_packages(where="."),
|
||||
python_requires=">=3.9",
|
||||
install_requires=_process_requirements(),
|
||||
)
|
||||
Reference in New Issue
Block a user