Compare commits
196 Commits
huggingfac
...
chatpaper-
| Author | SHA1 | Date | |
|---|---|---|---|
| 066b5d4d29 | |||
| 2373348c9e | |||
| 3f8a145c1d | |||
| 22f377e2fb | |||
| 37172906ef | |||
| 3b78e0538b | |||
| d8f9ac71d0 | |||
| aced272d3c | |||
| aff77a086d | |||
| 49253c4dc6 | |||
| 1a00093015 | |||
| 64f76e7401 | |||
| eb4c07997e | |||
| 99cf7205c3 | |||
| d684b4cdb3 | |||
| 601a95c948 | |||
| e18bef2e9c | |||
| f654c1af31 | |||
| 146fde30b8 | |||
| e90048a671 | |||
| ea624b1510 | |||
| 057e3dda3c | |||
| e79dcb1b48 | |||
| 0aadeabccc | |||
| 4290821a50 | |||
| 280e14d7b7 | |||
| 9f0cf9fb2b | |||
| b8560b7510 | |||
| d841d13b04 | |||
| efda9e5193 | |||
| 4bc073b072 | |||
| 756bd29f0c | |||
| 66c9e9a3cf | |||
| ca49af1e53 | |||
| 78df094eb9 | |||
| 33d2e75aac | |||
| 74941170aa | |||
| cd38949903 | |||
| d87f1eb171 | |||
| cd1e4e1ba7 | |||
| cf5f348d70 | |||
| 0ee25f475e | |||
| 1fede6df7f | |||
| 22a65cd163 | |||
| 538b041ea3 | |||
| d7b056576d | |||
| cb0bb6ab4a | |||
| bf955aaf12 | |||
| b24e664a85 | |||
| af3a1901a0 | |||
| 8affcd92a9 | |||
| d83e0a7704 | |||
| 61eb0da861 | |||
| 5da633d94d | |||
| f3e4e26e2f | |||
| 78c53b6bec | |||
| 84e09766cd | |||
| a84f4f43bf | |||
| cb7f6984a2 | |||
| 5703beb06b | |||
| fcb0f466b9 | |||
| af7734dd35 | |||
| 1b31d2e0d5 | |||
| d5bab093f9 | |||
| f94b167dc2 | |||
| 951d5ec758 | |||
| 016d8ee156 | |||
| dca9ec4bae | |||
| a06e43c96b | |||
| 29c6bfb6cb | |||
| 8d7ee975a0 | |||
| 4bafbb3562 | |||
| 7fdf0a8e51 | |||
| 2bb13b4677 | |||
| baa26e67ef | |||
| 9a5a509dd9 | |||
| cbcb98ef6a | |||
| bb864c6313 | |||
| 6d849eeb12 | |||
| ef752838b0 | |||
| 73d4a1ff4b | |||
| 8c62f21aa6 | |||
| c40ebfc21f | |||
| c365ea9f57 | |||
| 12d66777cc | |||
| 9ac3d0d65d | |||
| 9fd212652e | |||
| 790a1cf12a | |||
| 3ecf2977a8 | |||
| aeddf6b461 | |||
| ce0d8b9dab | |||
| 3c00e7a143 | |||
| ef1bfdd60f | |||
| e48d92e82e | |||
| 110510997f | |||
| b52695845e | |||
| f30c9c6d3b | |||
| ff5403eac6 | |||
| f9226d92be | |||
| a0ea5d0e9e | |||
| ce6f11d200 | |||
| 10b3001dba | |||
| e2de1d76ea | |||
| 77cc141a82 | |||
| 526b4d8ecd | |||
| 149db621ec | |||
| 2e1bb7311c | |||
| dae65fd2c2 | |||
| 9aafb2ee47 | |||
| 6bc91bd02e | |||
| 8ef7344101 | |||
| 40da1b0afe | |||
| c65def90f3 | |||
| ddeaf76422 | |||
| f23b66dec2 | |||
| a26b294817 | |||
| 66018840da | |||
| cea2144f34 | |||
| 7f5be93c1d | |||
| 85b838b302 | |||
| 27f97ba92a | |||
| 14269eba98 | |||
| d5c9bc9f0a | |||
| b0fed3edfc | |||
| 7296d054a2 | |||
| d57c7d352d | |||
| 3fd2927ea3 | |||
| b745074160 | |||
| 70ee810133 | |||
| 68fea9e79b | |||
| f82bf91aa8 | |||
| dde9edcc0c | |||
| 66c78e459e | |||
| de54102303 | |||
| 7c7d2d8a84 | |||
| 834f989ed4 | |||
| b658ee6e04 | |||
| 1a60280ea0 | |||
| 991cb7d272 | |||
| 463991cfb2 | |||
| 06f10b5fdc | |||
| d275d012c6 | |||
| c5d1ea3e21 | |||
| 0022b92404 | |||
| ef61221241 | |||
| 5a1831db98 | |||
| a643f8b0db | |||
| 601712fd0a | |||
| e769f831c7 | |||
| dcd952671f | |||
| 06564df038 | |||
| 2f037f30d5 | |||
| efedab186d | |||
| f49cae5116 | |||
| 2b620ccf2e | |||
| a1b7a4da56 | |||
| 61b0e49fed | |||
| f60dc371db | |||
| 0a3433b8ac | |||
| 31bce54abb | |||
| 5db1530717 | |||
| c32929fd11 | |||
| 3e4c2b056c | |||
| e79e9d7d23 | |||
| d175b93072 | |||
| ed254687d2 | |||
| c0392f7074 | |||
| f437712af7 | |||
| 6d1ea643e9 | |||
| 9e84cfcd46 | |||
| 897695d29f | |||
| 1dcc2873d2 | |||
| 42cf738a31 | |||
| e4646789af | |||
| e6c3aabd45 | |||
| 6789d1fab4 | |||
| 7a733f00a2 | |||
| dd55888f0e | |||
| 0327df22eb | |||
| e544f5e9d0 | |||
| 0fad4f44a4 | |||
| 1240dd6f26 | |||
| d6be947177 | |||
| 3cfbdce9f2 | |||
| 1ee471ff57 | |||
| 25ccecf8e3 | |||
| 9e991bfa3e | |||
| 221efd0193 | |||
| 976b9bf65f | |||
| ae5783e383 | |||
| 30224af042 | |||
| 8ff7c15cd8 | |||
| f3205994ea | |||
| ec8cc48a4d | |||
| 5d75c578b9 | |||
| cd411c2eea |
38
.github/workflows/build-image.yaml
vendored
Normal file
38
.github/workflows/build-image.yaml
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
name: Build Image
|
||||
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Tag for the images'
|
||||
required: true
|
||||
|
||||
env:
|
||||
REGISTRY: registry.cn-hongkong.aliyuncs.com
|
||||
NAMESPACE: chatwithpaper
|
||||
IMAGE: academic
|
||||
TAG: ${{ github.event.inputs.release_tag || github.event.client_payload.release_tag }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
environment: production
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
registry: "${{ env.REGISTRY }}"
|
||||
username: "${{ secrets.ACR_USER }}"
|
||||
password: "${{ secrets.ACR_PASSWORD }}"
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: docs/Dockerfile+NoLocal+Latex
|
||||
tags: ${{ env.REGISTRY }}/${{ env.NAMESPACE }}/${{ env.IMAGE }}:${{ env.TAG }}
|
||||
push: true
|
||||
44
.github/workflows/build-with-chatglm.yml
vendored
44
.github/workflows/build-with-chatglm.yml
vendored
@ -1,44 +0,0 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: Create and publish a Docker image for ChatGLM support
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_chatglm_moss
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+ChatGLM+Moss
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
44
.github/workflows/build-with-jittorllms.yml
vendored
44
.github/workflows/build-with-jittorllms.yml
vendored
@ -1,44 +0,0 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: Create and publish a Docker image for ChatGLM support
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_jittorllms
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+JittorLLMs
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
44
.github/workflows/build-without-local-llms.yml
vendored
44
.github/workflows/build-without-local-llms.yml
vendored
@ -1,44 +0,0 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: Create and publish a Docker image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_nolocal
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+NoLocal
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
12
README.md
12
README.md
@ -1,15 +1,3 @@
|
||||
---
|
||||
title: ChatImprovement
|
||||
emoji: 😻
|
||||
colorFrom: blue
|
||||
colorTo: blue
|
||||
sdk: gradio
|
||||
sdk_version: 3.32.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
---
|
||||
|
||||
# ChatGPT 学术优化
|
||||
> **Note**
|
||||
>
|
||||
> 2023.5.27 对Gradio依赖进行了调整,Fork并解决了官方Gradio的若干Bugs。请及时**更新代码**并重新更新pip依赖。安装依赖时,请严格选择`requirements.txt`中**指定的版本**:
|
||||
|
||||
@ -45,9 +45,10 @@ WEB_PORT = -1
|
||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||
MAX_RETRY = 2
|
||||
|
||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
|
||||
AVAIL_LLM_MODELS = ["newbing-free", "gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo"]
|
||||
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
|
||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||
|
||||
@ -42,6 +42,7 @@ def get_core_functions():
|
||||
"中译英": {
|
||||
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
"学术中英互译": {
|
||||
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
|
||||
@ -63,6 +64,7 @@ def get_core_functions():
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
"解释代码": {
|
||||
"Prefix": r"请解释以下代码:" + "\n```\n",
|
||||
|
||||
@ -26,6 +26,7 @@ def get_crazy_functions():
|
||||
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
|
||||
|
||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
||||
|
||||
function_plugins = {
|
||||
"解析整个Python项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
@ -47,10 +48,10 @@ def get_crazy_functions():
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Color": "stop",
|
||||
"Function": HotReload(总结word文档)
|
||||
},
|
||||
# "批量总结Word文档": {
|
||||
# "Color": "stop",
|
||||
# "Function": HotReload(总结word文档)
|
||||
# },
|
||||
"解析整个C++项目头文件": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
@ -108,10 +109,10 @@ def get_crazy_functions():
|
||||
"保存当前的对话": {
|
||||
"Function": HotReload(对话历史存档)
|
||||
},
|
||||
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析项目本身)
|
||||
},
|
||||
# "[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Function": HotReload(解析项目本身)
|
||||
# },
|
||||
# "[老旧的Demo] 把本项目源代码切换成全英文": {
|
||||
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
@ -137,15 +138,15 @@ def get_crazy_functions():
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
|
||||
function_plugins.update({
|
||||
"批量翻译PDF文档(多线程)": {
|
||||
"本地PDF全文翻译": {
|
||||
"Color": "stop",
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
},
|
||||
"询问多个GPT模型": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(同时问询)
|
||||
},
|
||||
# "询问多个GPT模型": {
|
||||
# "Color": "stop", # 按钮颜色
|
||||
# "Function": HotReload(同时问询)
|
||||
# },
|
||||
"[测试功能] 批量总结PDF文档": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
@ -222,54 +223,57 @@ def get_crazy_functions():
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
function_plugins.update({
|
||||
"ArXiv Latex一键翻译(输入区给定arXiv ID)": {
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
# try:
|
||||
# from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
# function_plugins.update({
|
||||
# "连接网络回答问题(先输入问题,再点击按钮,需要访问谷歌)": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Function": HotReload(连接网络回答问题)
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
function_plugins.update({
|
||||
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(连接网络回答问题)
|
||||
}
|
||||
})
|
||||
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||
function_plugins.update({
|
||||
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(连接bing搜索回答问题)
|
||||
}
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
# try:
|
||||
# from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
# function_plugins.update({
|
||||
# "解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False,
|
||||
# "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
# "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||
# "Function": HotReload(解析任意code项目)
|
||||
# },
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
function_plugins.update({
|
||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(解析任意code项目)
|
||||
},
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
function_plugins.update({
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型)
|
||||
},
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
# try:
|
||||
# from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
# function_plugins.update({
|
||||
# "询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False,
|
||||
# "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
# "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
# "Function": HotReload(同时问询_指定模型)
|
||||
# },
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.图片生成 import 图片生成
|
||||
@ -364,29 +368,18 @@ def get_crazy_functions():
|
||||
"Function": HotReload(Latex英文纠错加PDF对比)
|
||||
}
|
||||
})
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
function_plugins.update({
|
||||
"Arixv翻译(输入arxivID)[需Latex]": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
function_plugins.update({
|
||||
"本地论文翻译(上传Latex压缩包)[需Latex]": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
|
||||
# function_plugins.update({
|
||||
# "本地论文翻译(上传Latex压缩包) [需Latex]": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False,
|
||||
# "AdvancedArgs": True,
|
||||
# "ArgsReminder":
|
||||
# "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
# "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
# "Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
# }
|
||||
# })
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
@ -404,4 +397,18 @@ def get_crazy_functions():
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.虚空终端 import 终端
|
||||
# function_plugins.update({
|
||||
# "超级终端": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False,
|
||||
# # "AdvancedArgs": True,
|
||||
# # "ArgsReminder": "",
|
||||
# "Function": HotReload(终端)
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
return function_plugins
|
||||
|
||||
@ -3,7 +3,9 @@ from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip
|
||||
from functools import partial
|
||||
import glob, os, requests, time
|
||||
pj = os.path.join
|
||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
# ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
# ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
ARXIV_CACHE_DIR = os.getenv("Arxiv_Cache")
|
||||
|
||||
# =================================== 工具函数 ===============================================
|
||||
专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||
@ -190,9 +192,9 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
||||
|
||||
|
||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
|
||||
if not os.path.exists(project_folder + '/merge_proofread.tex'):
|
||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||
chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_)
|
||||
chatbot, history, system_prompt, mode='proofread_latex', switch_prompt=switch_prompt)
|
||||
|
||||
|
||||
# <-------------- compile PDF ------------->
|
||||
|
||||
@ -193,8 +193,9 @@ def test_Latex():
|
||||
# txt = r"https://arxiv.org/abs/2212.10156"
|
||||
# txt = r"https://arxiv.org/abs/2211.11559"
|
||||
# txt = r"https://arxiv.org/abs/2303.08774"
|
||||
txt = r"https://arxiv.org/abs/2303.12712"
|
||||
# txt = r"https://arxiv.org/abs/2303.12712"
|
||||
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
|
||||
txt = r"C:\Users\fuqingxu\Desktop\9"
|
||||
|
||||
|
||||
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
|
||||
@ -449,9 +449,10 @@ class LatexPaperSplit():
|
||||
"""
|
||||
def __init__(self) -> None:
|
||||
self.nodes = None
|
||||
self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
||||
self.msg = "{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
||||
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
|
||||
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
||||
"项目Github地址: \\url{https://github.com/binary-husky/gpt_academic/}。" + \
|
||||
"项目在线体验地址: \\url{https://chatpaper.org}。"
|
||||
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
||||
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from toolbox import update_ui
|
||||
from toolbox import update_ui, promote_file_to_downloadzone
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
from .crazy_utils import read_and_clean_pdf_text
|
||||
@ -147,23 +147,14 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
||||
print('writing html result failed:', trimmed_format_exc())
|
||||
|
||||
# 准备文件的下载
|
||||
import shutil
|
||||
for pdf_path in generated_conclusion_files:
|
||||
# 重命名文件
|
||||
rename_file = f'./gpt_log/翻译-{os.path.basename(pdf_path)}'
|
||||
if os.path.exists(rename_file):
|
||||
os.remove(rename_file)
|
||||
shutil.copyfile(pdf_path, rename_file)
|
||||
if os.path.exists(pdf_path):
|
||||
os.remove(pdf_path)
|
||||
rename_file = f'翻译-{os.path.basename(pdf_path)}'
|
||||
promote_file_to_downloadzone(pdf_path, rename_file=rename_file, chatbot=chatbot)
|
||||
for html_path in generated_html_files:
|
||||
# 重命名文件
|
||||
rename_file = f'./gpt_log/翻译-{os.path.basename(html_path)}'
|
||||
if os.path.exists(rename_file):
|
||||
os.remove(rename_file)
|
||||
shutil.copyfile(html_path, rename_file)
|
||||
if os.path.exists(html_path):
|
||||
os.remove(html_path)
|
||||
rename_file = f'翻译-{os.path.basename(html_path)}'
|
||||
promote_file_to_downloadzone(html_path, rename_file=rename_file, chatbot=chatbot)
|
||||
chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
@ -13,11 +13,11 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append((txt, "正在同时咨询gpt-3.5和gpt-4……"))
|
||||
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
llm_kwargs['llm_model'] = 'gpt-3.5-turbo&gpt-4' # 支持任意数量的llm接口,用&符号分隔
|
||||
llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=txt, inputs_show_user=txt,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
|
||||
@ -104,7 +104,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
||||
meta_paper_info_list = meta_paper_info_list[batchsize:]
|
||||
|
||||
chatbot.append(["状态?",
|
||||
"已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write an academic \"Related Works\" section about \"你搜索的研究领域\" for me."])
|
||||
"已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."])
|
||||
msg = '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
|
||||
BIN
docs/gradio-3.32.2-py3-none-any.whl
Normal file
BIN
docs/gradio-3.32.2-py3-none-any.whl
Normal file
Binary file not shown.
@ -96,15 +96,6 @@
|
||||
|
||||
● 部署名(不是模型名)
|
||||
|
||||
# 修改 config.py
|
||||
|
||||
```
|
||||
AZURE_ENDPOINT = "填入终结点"
|
||||
AZURE_API_KEY = "填入azure openai api的密钥"
|
||||
AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改
|
||||
AZURE_ENGINE = "填入部署名"
|
||||
|
||||
```
|
||||
# API的使用
|
||||
|
||||
接下来就是具体怎么使用API了,还是可以参考官方文档:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python)
|
||||
|
||||
@ -1,10 +1,9 @@
|
||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
from pathlib import Path
|
||||
|
||||
def main():
|
||||
import subprocess, sys
|
||||
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gradio-stable-fork'])
|
||||
import gradio as gr
|
||||
if gr.__version__ not in ['3.28.3','3.32.3']: assert False, "请用 pip install -r requirements.txt 安装依赖"
|
||||
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖,详情信息见requirements.txt"
|
||||
from request_llm.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
@ -17,7 +16,7 @@ def main():
|
||||
|
||||
from check_proxy import get_current_version
|
||||
initial_prompt = "Serve me as a writing and programming assistant."
|
||||
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
||||
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 网页测试版 {get_current_version()}</h1>"
|
||||
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||
|
||||
# 问询记录, python 版本建议3.9+(越新越好)
|
||||
@ -54,9 +53,22 @@ def main():
|
||||
CHATBOT_HEIGHT /= 2
|
||||
|
||||
cancel_handles = []
|
||||
# Read your Baidu statistics code from the file
|
||||
baidu_stats_code = Path('./sites/baidu_stats.html').read_text()
|
||||
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||
# Insert your Baidu statistics code here
|
||||
gradio_original_template_fn = gr.routes.templates.TemplateResponse
|
||||
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{baidu_stats_code}</html>'.encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
|
||||
# Insert Title
|
||||
gr.HTML(title_html)
|
||||
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
|
||||
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
||||
with gr_L1():
|
||||
with gr_L2(scale=2):
|
||||
@ -66,7 +78,7 @@ def main():
|
||||
with gr_L2(scale=1):
|
||||
with gr.Accordion("输入区", open=True) as area_input_primary:
|
||||
with gr.Row():
|
||||
txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持OpenAI密钥和API2D密钥共存。").style(container=False)
|
||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
||||
with gr.Row():
|
||||
submitBtn = gr.Button("提交", variant="primary")
|
||||
with gr.Row():
|
||||
@ -74,7 +86,13 @@ def main():
|
||||
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
||||
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
|
||||
with gr.Row():
|
||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
||||
status = gr.Markdown(f"""Tips: 1. 按Enter提交, 按Shift+Enter换行;2. 当前模型: {LLM_MODEL} \n {proxy_info}.
|
||||
3. 请注意隐私保护和遵守法律法规;
|
||||
4. 请勿使用本服务进行违法犯罪活动;
|
||||
5. 我和qingxu都希望能够为大家提供一个好的**学术工具**,希望大家不要攻击和滥用本服务;
|
||||
6. 本服务还存在各种bug,如果发现bug,欢迎加群反馈或者发issue告诉我们;
|
||||
7. 希望大家能结合ChatPaper的速读,找到需要精读的,再用本工具的全文翻译,实现快速知识摄取。
|
||||
""")
|
||||
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
||||
with gr.Row():
|
||||
for k in functional:
|
||||
@ -170,6 +188,7 @@ def main():
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
||||
return ret
|
||||
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
|
||||
|
||||
def on_md_dropdown_changed(k):
|
||||
return {chatbot: gr.update(label="当前模型:"+k)}
|
||||
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
|
||||
@ -183,6 +202,7 @@ def main():
|
||||
# 终止按钮的回调函数注册
|
||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
demo.load(on_dropdown_changed, inputs=gr.State("ArXiv Latex一键翻译(输入区给定arXiv ID)"), outputs=[switchy_bt, plugin_advanced_arg])
|
||||
|
||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
def auto_opentab_delay():
|
||||
@ -200,7 +220,10 @@ def main():
|
||||
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
||||
|
||||
auto_opentab_delay()
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
||||
server_name="0.0.0.0", server_port=PORT,
|
||||
favicon_path="docs/logo.png", auth=AUTHENTICATION,
|
||||
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
|
||||
|
||||
# 如果需要在二级路径下运行
|
||||
# CUSTOM_PATH, = get_conf('CUSTOM_PATH')
|
||||
@ -152,7 +152,7 @@ model_info = {
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
# 将 chatglm 直接对齐到 chatglm2
|
||||
# chatglm
|
||||
"chatglm": {
|
||||
"fn_with_ui": chatglm_ui,
|
||||
"fn_without_ui": chatglm_noui,
|
||||
@ -161,15 +161,6 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"chatglm2": {
|
||||
"fn_with_ui": chatglm_ui,
|
||||
"fn_without_ui": chatglm_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 1024,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# newbing
|
||||
"newbing": {
|
||||
"fn_with_ui": newbing_ui,
|
||||
|
||||
@ -40,12 +40,12 @@ class GetGLMHandle(Process):
|
||||
while True:
|
||||
try:
|
||||
if self.chatglm_model is None:
|
||||
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
||||
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
device, = get_conf('LOCAL_MODEL_DEVICE')
|
||||
if device=='cpu':
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float()
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
||||
else:
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||
self.chatglm_model = self.chatglm_model.eval()
|
||||
break
|
||||
else:
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
./docs/gradio-3.32.2-py3-none-any.whl
|
||||
tiktoken>=0.3.3
|
||||
requests[socks]
|
||||
transformers
|
||||
@ -14,4 +15,4 @@ pymupdf
|
||||
openai
|
||||
numpy
|
||||
arxiv
|
||||
rich
|
||||
rich
|
||||
10
sites/baidu_stats.html
Normal file
10
sites/baidu_stats.html
Normal file
@ -0,0 +1,10 @@
|
||||
<!-- baidu_stats.html -->
|
||||
<script>
|
||||
var _hmt = _hmt || [];
|
||||
(function() {
|
||||
var hm = document.createElement("script");
|
||||
hm.src = "https://hm.baidu.com/hm.js?208673d55832a94b9bbe10b1f4e70c09";
|
||||
var s = document.getElementsByTagName("script")[0];
|
||||
s.parentNode.insertBefore(hm, s);
|
||||
})();
|
||||
</script>
|
||||
@ -842,4 +842,4 @@ def objload(file='objdump.tmp'):
|
||||
return
|
||||
with open(file, 'rb') as f:
|
||||
return pickle.load(f)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user