Compare commits

..

11 Commits

16 changed files with 277 additions and 191 deletions

View File

@ -1,38 +0,0 @@
name: Build Image
on:
workflow_dispatch:
inputs:
release_tag:
description: 'Tag for the images'
required: true
env:
REGISTRY: registry.cn-hongkong.aliyuncs.com
NAMESPACE: chatwithpaper
IMAGE: academic
TAG: ${{ github.event.inputs.release_tag || github.event.client_payload.release_tag }}
jobs:
build:
runs-on: ubuntu-latest
environment: production
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Login to Registry
uses: docker/login-action@v2.1.0
with:
registry: "${{ env.REGISTRY }}"
username: "${{ secrets.ACR_USER }}"
password: "${{ secrets.ACR_PASSWORD }}"
- name: Build and push image
uses: docker/build-push-action@v4
with:
context: .
file: docs/Dockerfile+NoLocal+Latex
tags: ${{ env.REGISTRY }}/${{ env.NAMESPACE }}/${{ env.IMAGE }}:${{ env.TAG }}
push: true

View File

@ -0,0 +1,44 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image for ChatGLM support
on:
push:
branches:
- 'master'
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}_chatglm_moss
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
file: docs/GithubAction+ChatGLM+Moss
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -0,0 +1,44 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image for ChatGLM support
on:
push:
branches:
- 'master'
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}_jittorllms
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
file: docs/GithubAction+JittorLLMs
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -0,0 +1,44 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: Create and publish a Docker image
on:
push:
branches:
- 'master'
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}_nolocal
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
file: docs/GithubAction+NoLocal
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -47,7 +47,7 @@ MAX_RETRY = 2
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 ) # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo"] AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt35", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"]
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] # P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
@ -56,6 +56,9 @@ LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
# 设置gradio的并行线程数不需要修改 # 设置gradio的并行线程数不需要修改
CONCURRENT_COUNT = 100 CONCURRENT_COUNT = 100
# 是否在提交时自动清空输入框
AUTO_CLEAR_TXT = False
# 加一个live2d装饰 # 加一个live2d装饰
ADD_WAIFU = False ADD_WAIFU = False

View File

@ -42,7 +42,6 @@ def get_core_functions():
"中译英": { "中译英": {
"Prefix": r"Please translate following sentence to English:" + "\n\n", "Prefix": r"Please translate following sentence to English:" + "\n\n",
"Suffix": r"", "Suffix": r"",
"Visible": False,
}, },
"学术中英互译": { "学术中英互译": {
"Prefix": r"I want you to act as a scientific English-Chinese translator, " + "Prefix": r"I want you to act as a scientific English-Chinese translator, " +
@ -75,6 +74,5 @@ def get_core_functions():
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." + r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
r"Items need to be transformed:", r"Items need to be transformed:",
"Suffix": r"", "Suffix": r"",
"Visible": False,
} }
} }

View File

@ -26,7 +26,6 @@ def get_crazy_functions():
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录 from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
from crazy_functions.批量Markdown翻译 import Markdown英译中 from crazy_functions.批量Markdown翻译 import Markdown英译中
function_plugins = { function_plugins = {
"解析整个Python项目": { "解析整个Python项目": {
"Color": "stop", # 按钮颜色 "Color": "stop", # 按钮颜色
@ -48,10 +47,10 @@ def get_crazy_functions():
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False "AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "若输入0则不解析notebook中的Markdown块", # 高级参数输入区的显示提示 "ArgsReminder": "若输入0则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
}, },
# "批量总结Word文档": { "批量总结Word文档": {
# "Color": "stop", "Color": "stop",
# "Function": HotReload(总结word文档) "Function": HotReload(总结word文档)
# }, },
"解析整个C++项目头文件": { "解析整个C++项目头文件": {
"Color": "stop", # 按钮颜色 "Color": "stop", # 按钮颜色
"AsButton": False, # 加入下拉菜单中 "AsButton": False, # 加入下拉菜单中
@ -109,10 +108,10 @@ def get_crazy_functions():
"保存当前的对话": { "保存当前的对话": {
"Function": HotReload(对话历史存档) "Function": HotReload(对话历史存档)
}, },
# "[多线程Demo] 解析此项目本身(源码自译解)": { "[多线程Demo] 解析此项目本身(源码自译解)": {
# "AsButton": False, # 加入下拉菜单中 "AsButton": False, # 加入下拉菜单中
# "Function": HotReload(解析项目本身) "Function": HotReload(解析项目本身)
# }, },
# "[老旧的Demo] 把本项目源代码切换成全英文": { # "[老旧的Demo] 把本项目源代码切换成全英文": {
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效 # # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
# "AsButton": False, # 加入下拉菜单中 # "AsButton": False, # 加入下拉菜单中
@ -138,15 +137,15 @@ def get_crazy_functions():
from crazy_functions.批量Markdown翻译 import Markdown中译英 from crazy_functions.批量Markdown翻译 import Markdown中译英
function_plugins.update({ function_plugins.update({
"本地PDF全文翻译": { "批量翻译PDF文档多线程": {
"Color": "stop", "Color": "stop",
"AsButton": True, # 加入下拉菜单中 "AsButton": True, # 加入下拉菜单中
"Function": HotReload(批量翻译PDF文档) "Function": HotReload(批量翻译PDF文档)
}, },
# "询问多个GPT模型": { "询问多个GPT模型": {
# "Color": "stop", # 按钮颜色 "Color": "stop", # 按钮颜色
# "Function": HotReload(同时问询) "Function": HotReload(同时问询)
# }, },
"[测试功能] 批量总结PDF文档": { "[测试功能] 批量总结PDF文档": {
"Color": "stop", "Color": "stop",
"AsButton": False, # 加入下拉菜单中 "AsButton": False, # 加入下拉菜单中
@ -223,57 +222,54 @@ def get_crazy_functions():
}) })
except: except:
print('Load function plugin failed') print('Load function plugin failed')
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
function_plugins.update({
"ArXiv Latex一键翻译输入区给定arXiv ID": {
"Color": "stop",
"AsButton": True,
"AdvancedArgs": True,
"ArgsReminder":
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Function": HotReload(Latex翻译中文并重新编译PDF)
}
})
# try:
# from crazy_functions.联网的ChatGPT import 连接网络回答问题
# function_plugins.update({
# "连接网络回答问题(先输入问题,再点击按钮,需要访问谷歌)": {
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# "Function": HotReload(连接网络回答问题)
# }
# })
# except:
# print('Load function plugin failed')
# try: try:
# from crazy_functions.解析项目源代码 import 解析任意code项目 from crazy_functions.联网的ChatGPT import 连接网络回答问题
# function_plugins.update({ function_plugins.update({
# "解析项目源代码(手动指定和筛选源代码文件类型)": { "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
# "Color": "stop", "Color": "stop",
# "AsButton": False, "AsButton": False, # 加入下拉菜单中
# "AdvancedArgs": True, # 调用时唤起高级参数输入区默认False "Function": HotReload(连接网络回答问题)
# "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示 }
# "Function": HotReload(解析任意code项目) })
# }, from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
# }) function_plugins.update({
# except: "连接网络回答问题中文Bing版输入问题后点击该插件": {
# print('Load function plugin failed') "Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Function": HotReload(连接bing搜索回答问题)
}
})
except:
print('Load function plugin failed')
# try: try:
# from crazy_functions.询问多个大语言模型 import 同时问询_指定模型 from crazy_functions.解析项目源代码 import 解析任意code项目
# function_plugins.update({ function_plugins.update({
# "询问多个GPT模型手动指定询问哪些模型": { "解析项目源代码(手动指定和筛选源代码文件类型)": {
# "Color": "stop", "Color": "stop",
# "AsButton": False, "AsButton": False,
# "AdvancedArgs": True, # 调用时唤起高级参数输入区默认False "AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
# "ArgsReminder": "支持任意数量的llm接口用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示 "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
# "Function": HotReload(同时问询_指定模型) "Function": HotReload(解析任意code项目)
# }, },
# }) })
# except: except:
# print('Load function plugin failed') print('Load function plugin failed')
try:
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
function_plugins.update({
"询问多个GPT模型手动指定询问哪些模型": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时唤起高级参数输入区默认False
"ArgsReminder": "支持任意数量的llm接口用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
"Function": HotReload(同时问询_指定模型)
},
})
except:
print('Load function plugin failed')
try: try:
from crazy_functions.图片生成 import 图片生成 from crazy_functions.图片生成 import 图片生成
@ -368,18 +364,29 @@ def get_crazy_functions():
"Function": HotReload(Latex英文纠错加PDF对比) "Function": HotReload(Latex英文纠错加PDF对比)
} }
}) })
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
# function_plugins.update({ function_plugins.update({
# "本地论文翻译上传Latex压缩包 [需Latex]": { "Arixv翻译输入arxivID[需Latex]": {
# "Color": "stop", "Color": "stop",
# "AsButton": False, "AsButton": False,
# "AdvancedArgs": True, "AdvancedArgs": True,
# "ArgsReminder": "ArgsReminder":
# "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+ "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
# "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ', "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
# "Function": HotReload(Latex翻译中文并重新编译PDF) "Function": HotReload(Latex翻译中文并重新编译PDF)
# } }
# }) })
function_plugins.update({
"本地论文翻译上传Latex压缩包[需Latex]": {
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder":
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Function": HotReload(Latex翻译中文并重新编译PDF)
}
})
except: except:
print('Load function plugin failed') print('Load function plugin failed')
@ -397,18 +404,4 @@ def get_crazy_functions():
# except: # except:
# print('Load function plugin failed') # print('Load function plugin failed')
# try:
# from crazy_functions.虚空终端 import 终端
# function_plugins.update({
# "超级终端": {
# "Color": "stop",
# "AsButton": False,
# # "AdvancedArgs": True,
# # "ArgsReminder": "",
# "Function": HotReload(终端)
# }
# })
# except:
# print('Load function plugin failed')
return function_plugins return function_plugins

View File

@ -3,9 +3,7 @@ from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip
from functools import partial from functools import partial
import glob, os, requests, time import glob, os, requests, time
pj = os.path.join pj = os.path.join
# ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/") ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
# ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
ARXIV_CACHE_DIR = os.getenv("Arxiv_Cache")
# =================================== 工具函数 =============================================== # =================================== 工具函数 ===============================================
专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". ' 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
@ -192,9 +190,9 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
# <-------------- if merge_translate_zh is already generated, skip gpt req -------------> # <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
if not os.path.exists(project_folder + '/merge_proofread.tex'): if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
chatbot, history, system_prompt, mode='proofread_latex', switch_prompt=switch_prompt) chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_)
# <-------------- compile PDF -------------> # <-------------- compile PDF ------------->

View File

@ -195,7 +195,7 @@ def test_Latex():
# txt = r"https://arxiv.org/abs/2303.08774" # txt = r"https://arxiv.org/abs/2303.08774"
# txt = r"https://arxiv.org/abs/2303.12712" # txt = r"https://arxiv.org/abs/2303.12712"
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder" # txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
txt = r"C:\Users\fuqingxu\Desktop\9" txt = r"2306.17157" # 这个paper有个input命令文件名大小写错误
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):

View File

@ -189,6 +189,18 @@ def rm_comments(main_file):
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串 main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
return main_file return main_file
def find_tex_file_ignore_case(fp):
dir_name = os.path.dirname(fp)
base_name = os.path.basename(fp)
if not base_name.endswith('.tex'): base_name+='.tex'
if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name)
# go case in-sensitive
import glob
for f in glob.glob(dir_name+'/*.tex'):
base_name_s = os.path.basename(fp)
if base_name_s.lower() == base_name.lower(): return f
return None
def merge_tex_files_(project_foler, main_file, mode): def merge_tex_files_(project_foler, main_file, mode):
""" """
Merge Tex project recrusively Merge Tex project recrusively
@ -197,14 +209,11 @@ def merge_tex_files_(project_foler, main_file, mode):
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]): for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
f = s.group(1) f = s.group(1)
fp = os.path.join(project_foler, f) fp = os.path.join(project_foler, f)
if os.path.exists(fp): fp = find_tex_file_ignore_case(fp)
# e.g., \input{srcs/07_appendix.tex} if fp:
with open(fp, 'r', encoding='utf-8', errors='replace') as fx: with open(fp, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
c = fx.read()
else: else:
# e.g., \input{srcs/07_appendix} raise RuntimeError(f'找不到{fp}Tex源文件缺失')
with open(fp+'.tex', 'r', encoding='utf-8', errors='replace') as fx:
c = fx.read()
c = merge_tex_files_(project_foler, c, mode) c = merge_tex_files_(project_foler, c, mode)
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:] main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
return main_file return main_file
@ -449,10 +458,9 @@ class LatexPaperSplit():
""" """
def __init__(self) -> None: def __init__(self) -> None:
self.nodes = None self.nodes = None
self.msg = "{\\scriptsize\\textbf{警告该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成" + \ self.msg = "*{\\scriptsize\\textbf{警告该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成" + \
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \ "版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
"项目Github地址: \\url{https://github.com/binary-husky/gpt_academic/}。" + \ "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
"项目在线体验地址: \\url{https://chatpaper.org}。"
# 请您不要删除或修改这行警告除非您是论文的原作者如果您是论文原作者欢迎加REAME中的QQ联系开发者 # 请您不要删除或修改这行警告除非您是论文的原作者如果您是论文原作者欢迎加REAME中的QQ联系开发者
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"

View File

@ -27,8 +27,10 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
} }
response = requests.post(url, headers=headers, json=data, proxies=proxies) response = requests.post(url, headers=headers, json=data, proxies=proxies)
print(response.content) print(response.content)
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] try:
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
except:
raise RuntimeError(response.content.decode())
# 文件保存到本地 # 文件保存到本地
r = requests.get(image_url, proxies=proxies) r = requests.get(image_url, proxies=proxies)
file_path = 'gpt_log/image_gen/' file_path = 'gpt_log/image_gen/'

View File

@ -96,6 +96,15 @@
● 部署名(不是模型名) ● 部署名(不是模型名)
# 修改 config.py
```
AZURE_ENDPOINT = "填入终结点"
AZURE_API_KEY = "填入azure openai api的密钥"
AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改
AZURE_ENGINE = "填入部署名"
```
# API的使用 # API的使用
接下来就是具体怎么使用API了还是可以参考官方文档[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python) 接下来就是具体怎么使用API了还是可以参考官方文档[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python)

38
main.py
View File

@ -1,5 +1,4 @@
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
from pathlib import Path
def main(): def main():
import gradio as gr import gradio as gr
@ -7,8 +6,8 @@ def main():
from request_llm.bridge_all import predict from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS') get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
# 如果WEB_PORT是-1, 则随机选取WEB端口 # 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
@ -16,7 +15,7 @@ def main():
from check_proxy import get_current_version from check_proxy import get_current_version
initial_prompt = "Serve me as a writing and programming assistant." initial_prompt = "Serve me as a writing and programming assistant."
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 网页测试版 {get_current_version()}</h1>" title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)""" description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
# 问询记录, python 版本建议3.9+(越新越好) # 问询记录, python 版本建议3.9+(越新越好)
@ -53,21 +52,7 @@ def main():
CHATBOT_HEIGHT /= 2 CHATBOT_HEIGHT /= 2
cancel_handles = [] cancel_handles = []
# Read your Baidu statistics code from the file
baidu_stats_code = Path('./sites/baidu_stats.html').read_text()
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
# Insert your Baidu statistics code here
gradio_original_template_fn = gr.routes.templates.TemplateResponse
def gradio_new_template_fn(*args, **kwargs):
res = gradio_original_template_fn(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{baidu_stats_code}</html>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
# Insert Title
gr.HTML(title_html) gr.HTML(title_html)
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL}) cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
with gr_L1(): with gr_L1():
@ -86,13 +71,7 @@ def main():
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm") clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
with gr.Row(): with gr.Row():
status = gr.Markdown(f"""Tips: 1. 按Enter提交, 按Shift+Enter换行2. 当前模型: {LLM_MODEL} \n {proxy_info}. status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行当前模型: {LLM_MODEL} \n {proxy_info}")
3. 请注意隐私保护和遵守法律法规;
4. 请勿使用本服务进行违法犯罪活动;
5. 我和qingxu都希望能够为大家提供一个好的**学术工具**,希望大家不要攻击和滥用本服务;
6. 本服务还存在各种bug如果发现bug欢迎加群反馈或者发issue告诉我们
7. 希望大家能结合ChatPaper的速读找到需要精读的再用本工具的全文翻译实现快速知识摄取。
""")
with gr.Accordion("基础功能区", open=True) as area_basic_fn: with gr.Accordion("基础功能区", open=True) as area_basic_fn:
with gr.Row(): with gr.Row():
for k in functional: for k in functional:
@ -125,7 +104,7 @@ def main():
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="Local LLM MaxLength",) max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
@ -165,6 +144,11 @@ def main():
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
clearBtn.click(lambda: ("",""), None, [txt, txt2]) clearBtn.click(lambda: ("",""), None, [txt, txt2])
clearBtn2.click(lambda: ("",""), None, [txt, txt2]) clearBtn2.click(lambda: ("",""), None, [txt, txt2])
if AUTO_CLEAR_TXT:
submitBtn.click(lambda: ("",""), None, [txt, txt2])
submitBtn2.click(lambda: ("",""), None, [txt, txt2])
txt.submit(lambda: ("",""), None, [txt, txt2])
txt2.submit(lambda: ("",""), None, [txt, txt2])
# 基础功能区的回调函数注册 # 基础功能区的回调函数注册
for k in functional: for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
@ -188,7 +172,6 @@ def main():
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")}) ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
return ret return ret
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] ) dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
def on_md_dropdown_changed(k): def on_md_dropdown_changed(k):
return {chatbot: gr.update(label="当前模型:"+k)} return {chatbot: gr.update(label="当前模型:"+k)}
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] ) md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
@ -202,7 +185,6 @@ def main():
# 终止按钮的回调函数注册 # 终止按钮的回调函数注册
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
demo.load(on_dropdown_changed, inputs=gr.State("ArXiv Latex一键翻译输入区给定arXiv ID"), outputs=[switchy_bt, plugin_advanced_arg])
# gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数 # gradio的inbrowser触发不太稳定回滚代码到原始的浏览器打开函数
def auto_opentab_delay(): def auto_opentab_delay():

View File

@ -152,7 +152,7 @@ model_info = {
"token_cnt": get_token_num_gpt4, "token_cnt": get_token_num_gpt4,
}, },
# chatglm # chatglm 直接对齐到 chatglm2
"chatglm": { "chatglm": {
"fn_with_ui": chatglm_ui, "fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui, "fn_without_ui": chatglm_noui,
@ -161,6 +161,15 @@ model_info = {
"tokenizer": tokenizer_gpt35, "tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35, "token_cnt": get_token_num_gpt35,
}, },
"chatglm2": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# newbing # newbing
"newbing": { "newbing": {
"fn_with_ui": newbing_ui, "fn_with_ui": newbing_ui,

View File

@ -40,12 +40,12 @@ class GetGLMHandle(Process):
while True: while True:
try: try:
if self.chatglm_model is None: if self.chatglm_model is None:
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
device, = get_conf('LOCAL_MODEL_DEVICE') device, = get_conf('LOCAL_MODEL_DEVICE')
if device=='cpu': if device=='cpu':
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float()
else: else:
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
self.chatglm_model = self.chatglm_model.eval() self.chatglm_model = self.chatglm_model.eval()
break break
else: else:

View File

@ -1,10 +0,0 @@
<!-- baidu_stats.html -->
<script>
var _hmt = _hmt || [];
(function() {
var hm = document.createElement("script");
hm.src = "https://hm.baidu.com/hm.js?208673d55832a94b9bbe10b1f4e70c09";
var s = document.getElementsByTagName("script")[0];
s.parentNode.insertBefore(hm, s);
})();
</script>