From fe16cc76a7490cf22ba858a92d16e8241662ec6f Mon Sep 17 00:00:00 2001 From: w_xiaolizu Date: Tue, 13 Jun 2023 15:53:44 +0800 Subject: [PATCH] =?UTF-8?q?config=20=E9=BB=98=E8=AE=A4=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/config.py b/config.py index 116e4ee..09517c1 100644 --- a/config.py +++ b/config.py @@ -56,12 +56,16 @@ MAX_RETRY = 2 # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", 'proxy-gpt-4', "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"] # P.S. 其他可用的模型还包括 ["newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] # 本地LLM模型如ChatGLM的执行方式 CPU/GPU LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" +# OpenAI的API_URL +API_URL = "https://api.openai.com/v1/chat/completions" +PROXY_API_URL = '' # 你的网关应用 + # 设置gradio的并行线程数(不需要修改) CONCURRENT_COUNT = 100