Compare commits
28 Commits
chatpaper-
...
huggingfac
| Author | SHA1 | Date | |
|---|---|---|---|
| 96c1852abc | |||
| cd145c0794 | |||
| 7a4d4ad956 | |||
| 9f9848c6e9 | |||
| 94425c49fd | |||
| e874a16050 | |||
| c28388c5fe | |||
| b4a56d391b | |||
| 7075092f86 | |||
| 1086ff8092 | |||
| 3a22446b47 | |||
| 7842cf03cc | |||
| 54f55c32f2 | |||
| 94318ff0a2 | |||
| 5be6b83762 | |||
| 6f18d1716e | |||
| 90944bd744 | |||
| 752937cb70 | |||
| c584cbac5b | |||
| 309d12b404 | |||
| 52ea0acd61 | |||
| 9f5e3e0fd5 | |||
| 315e78e5d9 | |||
| b6b4ba684a | |||
| 2281a5ca7f | |||
| 49558686f2 | |||
| b050ccedb5 | |||
| ae56cab6f4 |
38
.github/workflows/build-image.yaml
vendored
38
.github/workflows/build-image.yaml
vendored
@ -1,38 +0,0 @@
|
|||||||
name: Build Image
|
|
||||||
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
release_tag:
|
|
||||||
description: 'Tag for the images'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: registry.cn-hongkong.aliyuncs.com
|
|
||||||
NAMESPACE: chatwithpaper
|
|
||||||
IMAGE: academic
|
|
||||||
TAG: ${{ github.event.inputs.release_tag || github.event.client_payload.release_tag }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
environment: production
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Login to Registry
|
|
||||||
uses: docker/login-action@v2.1.0
|
|
||||||
with:
|
|
||||||
registry: "${{ env.REGISTRY }}"
|
|
||||||
username: "${{ secrets.ACR_USER }}"
|
|
||||||
password: "${{ secrets.ACR_PASSWORD }}"
|
|
||||||
|
|
||||||
- name: Build and push image
|
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: docs/Dockerfile+NoLocal+Latex
|
|
||||||
tags: ${{ env.REGISTRY }}/${{ env.NAMESPACE }}/${{ env.IMAGE }}:${{ env.TAG }}
|
|
||||||
push: true
|
|
||||||
44
.github/workflows/build-with-chatglm.yml
vendored
Normal file
44
.github/workflows/build-with-chatglm.yml
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
|
name: Create and publish a Docker image for ChatGLM support
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}_chatglm_moss
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
file: docs/GithubAction+ChatGLM+Moss
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
44
.github/workflows/build-with-jittorllms.yml
vendored
Normal file
44
.github/workflows/build-with-jittorllms.yml
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
|
name: Create and publish a Docker image for ChatGLM support
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}_jittorllms
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
file: docs/GithubAction+JittorLLMs
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
44
.github/workflows/build-without-local-llms.yml
vendored
Normal file
44
.github/workflows/build-without-local-llms.yml
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
|
name: Create and publish a Docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}_nolocal
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
file: docs/GithubAction+NoLocal
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
12
README.md
12
README.md
@ -1,3 +1,15 @@
|
|||||||
|
---
|
||||||
|
title: ChatImprovement
|
||||||
|
emoji: 😻
|
||||||
|
colorFrom: blue
|
||||||
|
colorTo: blue
|
||||||
|
sdk: gradio
|
||||||
|
sdk_version: 3.32.0
|
||||||
|
app_file: app.py
|
||||||
|
pinned: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# ChatGPT 学术优化
|
||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> 2023.5.27 对Gradio依赖进行了调整,Fork并解决了官方Gradio的若干Bugs。请及时**更新代码**并重新更新pip依赖。安装依赖时,请严格选择`requirements.txt`中**指定的版本**:
|
> 2023.5.27 对Gradio依赖进行了调整,Fork并解决了官方Gradio的若干Bugs。请及时**更新代码**并重新更新pip依赖。安装依赖时,请严格选择`requirements.txt`中**指定的版本**:
|
||||||
|
|||||||
@ -1,9 +1,10 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
import subprocess, sys
|
||||||
|
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gradio-stable-fork'])
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "需要特殊依赖,请务必用 pip install -r requirements.txt 指令安装依赖,详情信息见requirements.txt"
|
if gr.__version__ not in ['3.28.3','3.32.3']: assert False, "请用 pip install -r requirements.txt 安装依赖"
|
||||||
from request_llm.bridge_all import predict
|
from request_llm.bridge_all import predict
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||||
@ -16,7 +17,7 @@ def main():
|
|||||||
|
|
||||||
from check_proxy import get_current_version
|
from check_proxy import get_current_version
|
||||||
initial_prompt = "Serve me as a writing and programming assistant."
|
initial_prompt = "Serve me as a writing and programming assistant."
|
||||||
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 网页测试版 {get_current_version()}</h1>"
|
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
||||||
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||||
|
|
||||||
# 问询记录, python 版本建议3.9+(越新越好)
|
# 问询记录, python 版本建议3.9+(越新越好)
|
||||||
@ -53,22 +54,9 @@ def main():
|
|||||||
CHATBOT_HEIGHT /= 2
|
CHATBOT_HEIGHT /= 2
|
||||||
|
|
||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
# Read your Baidu statistics code from the file
|
|
||||||
baidu_stats_code = Path('./sites/baidu_stats.html').read_text()
|
|
||||||
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||||
# Insert your Baidu statistics code here
|
|
||||||
gradio_original_template_fn = gr.routes.templates.TemplateResponse
|
|
||||||
|
|
||||||
def gradio_new_template_fn(*args, **kwargs):
|
|
||||||
res = gradio_original_template_fn(*args, **kwargs)
|
|
||||||
res.body = res.body.replace(b'</html>', f'{baidu_stats_code}</html>'.encode("utf8"))
|
|
||||||
res.init_headers()
|
|
||||||
return res
|
|
||||||
|
|
||||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
|
||||||
|
|
||||||
# Insert Title
|
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
|
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
|
||||||
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
with gr_L2(scale=2):
|
with gr_L2(scale=2):
|
||||||
@ -78,7 +66,7 @@ def main():
|
|||||||
with gr_L2(scale=1):
|
with gr_L2(scale=1):
|
||||||
with gr.Accordion("输入区", open=True) as area_input_primary:
|
with gr.Accordion("输入区", open=True) as area_input_primary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持OpenAI密钥和API2D密钥共存。").style(container=False)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
submitBtn = gr.Button("提交", variant="primary")
|
submitBtn = gr.Button("提交", variant="primary")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -86,13 +74,7 @@ def main():
|
|||||||
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
||||||
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
|
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
status = gr.Markdown(f"""Tips: 1. 按Enter提交, 按Shift+Enter换行;2. 当前模型: {LLM_MODEL} \n {proxy_info}.
|
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
||||||
3. 请注意隐私保护和遵守法律法规;
|
|
||||||
4. 请勿使用本服务进行违法犯罪活动;
|
|
||||||
5. 我和qingxu都希望能够为大家提供一个好的**学术工具**,希望大家不要攻击和滥用本服务;
|
|
||||||
6. 本服务还存在各种bug,如果发现bug,欢迎加群反馈或者发issue告诉我们;
|
|
||||||
7. 希望大家能结合ChatPaper的速读,找到需要精读的,再用本工具的全文翻译,实现快速知识摄取。
|
|
||||||
""")
|
|
||||||
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
for k in functional:
|
for k in functional:
|
||||||
@ -188,7 +170,6 @@ def main():
|
|||||||
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
||||||
return ret
|
return ret
|
||||||
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
|
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
|
||||||
|
|
||||||
def on_md_dropdown_changed(k):
|
def on_md_dropdown_changed(k):
|
||||||
return {chatbot: gr.update(label="当前模型:"+k)}
|
return {chatbot: gr.update(label="当前模型:"+k)}
|
||||||
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
|
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
|
||||||
@ -202,7 +183,6 @@ def main():
|
|||||||
# 终止按钮的回调函数注册
|
# 终止按钮的回调函数注册
|
||||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
demo.load(on_dropdown_changed, inputs=gr.State("ArXiv Latex一键翻译(输入区给定arXiv ID)"), outputs=[switchy_bt, plugin_advanced_arg])
|
|
||||||
|
|
||||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||||
def auto_opentab_delay():
|
def auto_opentab_delay():
|
||||||
@ -220,10 +200,7 @@ def main():
|
|||||||
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
||||||
|
|
||||||
auto_opentab_delay()
|
auto_opentab_delay()
|
||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
|
||||||
server_name="0.0.0.0", server_port=PORT,
|
|
||||||
favicon_path="docs/logo.png", auth=AUTHENTICATION,
|
|
||||||
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
|
|
||||||
|
|
||||||
# 如果需要在二级路径下运行
|
# 如果需要在二级路径下运行
|
||||||
# CUSTOM_PATH, = get_conf('CUSTOM_PATH')
|
# CUSTOM_PATH, = get_conf('CUSTOM_PATH')
|
||||||
@ -45,10 +45,9 @@ WEB_PORT = -1
|
|||||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||||
MAX_RETRY = 2
|
MAX_RETRY = 2
|
||||||
|
|
||||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
|
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
|
||||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo"]
|
AVAIL_LLM_MODELS = ["newbing-free", "gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
|
||||||
# P.S. 其他可用的模型还包括 ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
|
||||||
|
|
||||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||||
|
|||||||
@ -42,7 +42,6 @@ def get_core_functions():
|
|||||||
"中译英": {
|
"中译英": {
|
||||||
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
"Visible": False,
|
|
||||||
},
|
},
|
||||||
"学术中英互译": {
|
"学术中英互译": {
|
||||||
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
|
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
|
||||||
@ -64,7 +63,6 @@ def get_core_functions():
|
|||||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
||||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
"Visible": False,
|
|
||||||
},
|
},
|
||||||
"解释代码": {
|
"解释代码": {
|
||||||
"Prefix": r"请解释以下代码:" + "\n```\n",
|
"Prefix": r"请解释以下代码:" + "\n```\n",
|
||||||
|
|||||||
@ -26,7 +26,6 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
|
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
|
||||||
|
|
||||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
||||||
|
|
||||||
function_plugins = {
|
function_plugins = {
|
||||||
"解析整个Python项目": {
|
"解析整个Python项目": {
|
||||||
"Color": "stop", # 按钮颜色
|
"Color": "stop", # 按钮颜色
|
||||||
@ -48,10 +47,10 @@ def get_crazy_functions():
|
|||||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||||
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
||||||
},
|
},
|
||||||
# "批量总结Word文档": {
|
"批量总结Word文档": {
|
||||||
# "Color": "stop",
|
"Color": "stop",
|
||||||
# "Function": HotReload(总结word文档)
|
"Function": HotReload(总结word文档)
|
||||||
# },
|
},
|
||||||
"解析整个C++项目头文件": {
|
"解析整个C++项目头文件": {
|
||||||
"Color": "stop", # 按钮颜色
|
"Color": "stop", # 按钮颜色
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
@ -109,10 +108,10 @@ def get_crazy_functions():
|
|||||||
"保存当前的对话": {
|
"保存当前的对话": {
|
||||||
"Function": HotReload(对话历史存档)
|
"Function": HotReload(对话历史存档)
|
||||||
},
|
},
|
||||||
# "[多线程Demo] 解析此项目本身(源码自译解)": {
|
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||||
# "AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
# "Function": HotReload(解析项目本身)
|
"Function": HotReload(解析项目本身)
|
||||||
# },
|
},
|
||||||
# "[老旧的Demo] 把本项目源代码切换成全英文": {
|
# "[老旧的Demo] 把本项目源代码切换成全英文": {
|
||||||
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||||
# "AsButton": False, # 加入下拉菜单中
|
# "AsButton": False, # 加入下拉菜单中
|
||||||
@ -138,15 +137,15 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||||
|
|
||||||
function_plugins.update({
|
function_plugins.update({
|
||||||
"本地PDF全文翻译": {
|
"批量翻译PDF文档(多线程)": {
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True, # 加入下拉菜单中
|
"AsButton": True, # 加入下拉菜单中
|
||||||
"Function": HotReload(批量翻译PDF文档)
|
"Function": HotReload(批量翻译PDF文档)
|
||||||
},
|
},
|
||||||
# "询问多个GPT模型": {
|
"询问多个GPT模型": {
|
||||||
# "Color": "stop", # 按钮颜色
|
"Color": "stop", # 按钮颜色
|
||||||
# "Function": HotReload(同时问询)
|
"Function": HotReload(同时问询)
|
||||||
# },
|
},
|
||||||
"[测试功能] 批量总结PDF文档": {
|
"[测试功能] 批量总结PDF文档": {
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
@ -223,57 +222,54 @@ def get_crazy_functions():
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print('Load function plugin failed')
|
print('Load function plugin failed')
|
||||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
|
||||||
|
try:
|
||||||
|
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||||
function_plugins.update({
|
function_plugins.update({
|
||||||
"ArXiv Latex一键翻译(输入区给定arXiv ID)": {
|
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"AdvancedArgs": True,
|
"Function": HotReload(连接网络回答问题)
|
||||||
"ArgsReminder":
|
|
||||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
|
||||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
|
||||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
# try:
|
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||||
# from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
function_plugins.update({
|
||||||
# function_plugins.update({
|
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||||
# "连接网络回答问题(先输入问题,再点击按钮,需要访问谷歌)": {
|
"Color": "stop",
|
||||||
# "Color": "stop",
|
"AsButton": False, # 加入下拉菜单中
|
||||||
# "AsButton": False, # 加入下拉菜单中
|
"Function": HotReload(连接bing搜索回答问题)
|
||||||
# "Function": HotReload(连接网络回答问题)
|
}
|
||||||
# }
|
})
|
||||||
# })
|
except:
|
||||||
# except:
|
print('Load function plugin failed')
|
||||||
# print('Load function plugin failed')
|
|
||||||
|
|
||||||
# try:
|
try:
|
||||||
# from crazy_functions.解析项目源代码 import 解析任意code项目
|
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||||
# function_plugins.update({
|
function_plugins.update({
|
||||||
# "解析项目源代码(手动指定和筛选源代码文件类型)": {
|
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||||
# "Color": "stop",
|
"Color": "stop",
|
||||||
# "AsButton": False,
|
"AsButton": False,
|
||||||
# "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||||
# "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||||
# "Function": HotReload(解析任意code项目)
|
"Function": HotReload(解析任意code项目)
|
||||||
# },
|
},
|
||||||
# })
|
})
|
||||||
# except:
|
except:
|
||||||
# print('Load function plugin failed')
|
print('Load function plugin failed')
|
||||||
|
|
||||||
# try:
|
try:
|
||||||
# from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||||
# function_plugins.update({
|
function_plugins.update({
|
||||||
# "询问多个GPT模型(手动指定询问哪些模型)": {
|
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||||
# "Color": "stop",
|
"Color": "stop",
|
||||||
# "AsButton": False,
|
"AsButton": False,
|
||||||
# "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||||
# "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||||
# "Function": HotReload(同时问询_指定模型)
|
"Function": HotReload(同时问询_指定模型)
|
||||||
# },
|
},
|
||||||
# })
|
})
|
||||||
# except:
|
except:
|
||||||
# print('Load function plugin failed')
|
print('Load function plugin failed')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.图片生成 import 图片生成
|
from crazy_functions.图片生成 import 图片生成
|
||||||
@ -368,18 +364,29 @@ def get_crazy_functions():
|
|||||||
"Function": HotReload(Latex英文纠错加PDF对比)
|
"Function": HotReload(Latex英文纠错加PDF对比)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||||
# function_plugins.update({
|
function_plugins.update({
|
||||||
# "本地论文翻译(上传Latex压缩包) [需Latex]": {
|
"Arixv翻译(输入arxivID)[需Latex]": {
|
||||||
# "Color": "stop",
|
"Color": "stop",
|
||||||
# "AsButton": False,
|
"AsButton": False,
|
||||||
# "AdvancedArgs": True,
|
"AdvancedArgs": True,
|
||||||
# "ArgsReminder":
|
"ArgsReminder":
|
||||||
# "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||||
# "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
# "Function": HotReload(Latex翻译中文并重新编译PDF)
|
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||||
# }
|
}
|
||||||
# })
|
})
|
||||||
|
function_plugins.update({
|
||||||
|
"本地论文翻译(上传Latex压缩包)[需Latex]": {
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder":
|
||||||
|
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||||
|
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
|
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||||
|
}
|
||||||
|
})
|
||||||
except:
|
except:
|
||||||
print('Load function plugin failed')
|
print('Load function plugin failed')
|
||||||
|
|
||||||
@ -397,18 +404,4 @@ def get_crazy_functions():
|
|||||||
# except:
|
# except:
|
||||||
# print('Load function plugin failed')
|
# print('Load function plugin failed')
|
||||||
|
|
||||||
# try:
|
|
||||||
# from crazy_functions.虚空终端 import 终端
|
|
||||||
# function_plugins.update({
|
|
||||||
# "超级终端": {
|
|
||||||
# "Color": "stop",
|
|
||||||
# "AsButton": False,
|
|
||||||
# # "AdvancedArgs": True,
|
|
||||||
# # "ArgsReminder": "",
|
|
||||||
# "Function": HotReload(终端)
|
|
||||||
# }
|
|
||||||
# })
|
|
||||||
# except:
|
|
||||||
# print('Load function plugin failed')
|
|
||||||
|
|
||||||
return function_plugins
|
return function_plugins
|
||||||
|
|||||||
@ -3,9 +3,7 @@ from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
import glob, os, requests, time
|
import glob, os, requests, time
|
||||||
pj = os.path.join
|
pj = os.path.join
|
||||||
# ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||||
# ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
|
||||||
ARXIV_CACHE_DIR = os.getenv("Arxiv_Cache")
|
|
||||||
|
|
||||||
# =================================== 工具函数 ===============================================
|
# =================================== 工具函数 ===============================================
|
||||||
专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||||
@ -192,9 +190,9 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
|||||||
|
|
||||||
|
|
||||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||||
if not os.path.exists(project_folder + '/merge_proofread.tex'):
|
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
|
||||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||||
chatbot, history, system_prompt, mode='proofread_latex', switch_prompt=switch_prompt)
|
chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
|
|
||||||
# <-------------- compile PDF ------------->
|
# <-------------- compile PDF ------------->
|
||||||
|
|||||||
@ -193,9 +193,8 @@ def test_Latex():
|
|||||||
# txt = r"https://arxiv.org/abs/2212.10156"
|
# txt = r"https://arxiv.org/abs/2212.10156"
|
||||||
# txt = r"https://arxiv.org/abs/2211.11559"
|
# txt = r"https://arxiv.org/abs/2211.11559"
|
||||||
# txt = r"https://arxiv.org/abs/2303.08774"
|
# txt = r"https://arxiv.org/abs/2303.08774"
|
||||||
# txt = r"https://arxiv.org/abs/2303.12712"
|
txt = r"https://arxiv.org/abs/2303.12712"
|
||||||
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
|
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
|
||||||
txt = r"C:\Users\fuqingxu\Desktop\9"
|
|
||||||
|
|
||||||
|
|
||||||
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
|||||||
@ -449,10 +449,9 @@ class LatexPaperSplit():
|
|||||||
"""
|
"""
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.nodes = None
|
self.nodes = None
|
||||||
self.msg = "{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
||||||
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
|
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
|
||||||
"项目Github地址: \\url{https://github.com/binary-husky/gpt_academic/}。" + \
|
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
||||||
"项目在线体验地址: \\url{https://chatpaper.org}。"
|
|
||||||
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
||||||
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption, write_results_to_file
|
||||||
from toolbox import update_ui, promote_file_to_downloadzone
|
from toolbox import update_ui
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
from .crazy_utils import read_and_clean_pdf_text
|
from .crazy_utils import read_and_clean_pdf_text
|
||||||
@ -147,14 +147,23 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
print('writing html result failed:', trimmed_format_exc())
|
print('writing html result failed:', trimmed_format_exc())
|
||||||
|
|
||||||
# 准备文件的下载
|
# 准备文件的下载
|
||||||
|
import shutil
|
||||||
for pdf_path in generated_conclusion_files:
|
for pdf_path in generated_conclusion_files:
|
||||||
# 重命名文件
|
# 重命名文件
|
||||||
rename_file = f'翻译-{os.path.basename(pdf_path)}'
|
rename_file = f'./gpt_log/翻译-{os.path.basename(pdf_path)}'
|
||||||
promote_file_to_downloadzone(pdf_path, rename_file=rename_file, chatbot=chatbot)
|
if os.path.exists(rename_file):
|
||||||
|
os.remove(rename_file)
|
||||||
|
shutil.copyfile(pdf_path, rename_file)
|
||||||
|
if os.path.exists(pdf_path):
|
||||||
|
os.remove(pdf_path)
|
||||||
for html_path in generated_html_files:
|
for html_path in generated_html_files:
|
||||||
# 重命名文件
|
# 重命名文件
|
||||||
rename_file = f'翻译-{os.path.basename(html_path)}'
|
rename_file = f'./gpt_log/翻译-{os.path.basename(html_path)}'
|
||||||
promote_file_to_downloadzone(html_path, rename_file=rename_file, chatbot=chatbot)
|
if os.path.exists(rename_file):
|
||||||
|
os.remove(rename_file)
|
||||||
|
shutil.copyfile(html_path, rename_file)
|
||||||
|
if os.path.exists(html_path):
|
||||||
|
os.remove(html_path)
|
||||||
chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
|
chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
|||||||
@ -13,11 +13,11 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
web_port 当前软件运行的端口号
|
web_port 当前软件运行的端口号
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
chatbot.append((txt, "正在同时咨询gpt-3.5和gpt-4……"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
|
||||||
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||||
llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
llm_kwargs['llm_model'] = 'gpt-3.5-turbo&gpt-4' # 支持任意数量的llm接口,用&符号分隔
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=txt, inputs_show_user=txt,
|
inputs=txt, inputs_show_user=txt,
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
|||||||
@ -104,7 +104,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
meta_paper_info_list = meta_paper_info_list[batchsize:]
|
meta_paper_info_list = meta_paper_info_list[batchsize:]
|
||||||
|
|
||||||
chatbot.append(["状态?",
|
chatbot.append(["状态?",
|
||||||
"已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."])
|
"已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write an academic \"Related Works\" section about \"你搜索的研究领域\" for me."])
|
||||||
msg = '正常'
|
msg = '正常'
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
res = write_results_to_file(history)
|
res = write_results_to_file(history)
|
||||||
|
|||||||
Binary file not shown.
@ -96,6 +96,15 @@
|
|||||||
|
|
||||||
● 部署名(不是模型名)
|
● 部署名(不是模型名)
|
||||||
|
|
||||||
|
# 修改 config.py
|
||||||
|
|
||||||
|
```
|
||||||
|
AZURE_ENDPOINT = "填入终结点"
|
||||||
|
AZURE_API_KEY = "填入azure openai api的密钥"
|
||||||
|
AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改
|
||||||
|
AZURE_ENGINE = "填入部署名"
|
||||||
|
|
||||||
|
```
|
||||||
# API的使用
|
# API的使用
|
||||||
|
|
||||||
接下来就是具体怎么使用API了,还是可以参考官方文档:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python)
|
接下来就是具体怎么使用API了,还是可以参考官方文档:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python)
|
||||||
|
|||||||
@ -152,7 +152,7 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
# chatglm
|
# 将 chatglm 直接对齐到 chatglm2
|
||||||
"chatglm": {
|
"chatglm": {
|
||||||
"fn_with_ui": chatglm_ui,
|
"fn_with_ui": chatglm_ui,
|
||||||
"fn_without_ui": chatglm_noui,
|
"fn_without_ui": chatglm_noui,
|
||||||
@ -161,6 +161,15 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
"chatglm2": {
|
||||||
|
"fn_with_ui": chatglm_ui,
|
||||||
|
"fn_without_ui": chatglm_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 1024,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
|
||||||
# newbing
|
# newbing
|
||||||
"newbing": {
|
"newbing": {
|
||||||
"fn_with_ui": newbing_ui,
|
"fn_with_ui": newbing_ui,
|
||||||
|
|||||||
@ -40,12 +40,12 @@ class GetGLMHandle(Process):
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
if self.chatglm_model is None:
|
if self.chatglm_model is None:
|
||||||
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
||||||
device, = get_conf('LOCAL_MODEL_DEVICE')
|
device, = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
if device=='cpu':
|
if device=='cpu':
|
||||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float()
|
||||||
else:
|
else:
|
||||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
|
||||||
self.chatglm_model = self.chatglm_model.eval()
|
self.chatglm_model = self.chatglm_model.eval()
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
|||||||
@ -1,4 +1,3 @@
|
|||||||
./docs/gradio-3.32.2-py3-none-any.whl
|
|
||||||
tiktoken>=0.3.3
|
tiktoken>=0.3.3
|
||||||
requests[socks]
|
requests[socks]
|
||||||
transformers
|
transformers
|
||||||
|
|||||||
@ -1,10 +0,0 @@
|
|||||||
<!-- baidu_stats.html -->
|
|
||||||
<script>
|
|
||||||
var _hmt = _hmt || [];
|
|
||||||
(function() {
|
|
||||||
var hm = document.createElement("script");
|
|
||||||
hm.src = "https://hm.baidu.com/hm.js?208673d55832a94b9bbe10b1f4e70c09";
|
|
||||||
var s = document.getElementsByTagName("script")[0];
|
|
||||||
s.parentNode.insertBefore(hm, s);
|
|
||||||
})();
|
|
||||||
</script>
|
|
||||||
Reference in New Issue
Block a user