Add zh_ Langchain into dependent files

This commit is contained in:
kainstan
2023-06-06 09:37:04 +08:00
parent 0a83ba91e9
commit 6d7ee17dbd
2 changed files with 111 additions and 93 deletions

View File

@ -6,12 +6,14 @@ def input_clipping(inputs, history, max_token_limit):
import numpy as np import numpy as np
from request_llm.bridge_all import model_info from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer'] enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
def get_token_num(txt):
return len(enc.encode(txt, disallowed_special=()))
mode = 'input-and-history' mode = 'input-and-history'
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史 # 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
input_token_num = get_token_num(inputs) input_token_num = get_token_num(inputs)
if input_token_num < max_token_limit//2: if input_token_num < max_token_limit // 2:
mode = 'only-history' mode = 'only-history'
max_token_limit = max_token_limit - input_token_num max_token_limit = max_token_limit - input_token_num
@ -24,7 +26,7 @@ def input_clipping(inputs, history, max_token_limit):
while n_token > max_token_limit: while n_token > max_token_limit:
where = np.argmax(everything_token) where = np.argmax(everything_token)
encoded = enc.encode(everything[where], disallowed_special=()) encoded = enc.encode(everything[where], disallowed_special=())
clipped_encoded = encoded[:len(encoded)-delta] clipped_encoded = encoded[:len(encoded) - delta]
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
everything_token[where] = get_token_num(everything[where]) everything_token[where] = get_token_num(everything[where])
n_token = get_token_num('\n'.join(everything)) n_token = get_token_num('\n'.join(everything))
@ -42,7 +44,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
chatbot, history, sys_prompt, refresh_interval=0.2, chatbot, history, sys_prompt, refresh_interval=0.2,
handle_token_exceed=True, handle_token_exceed=True,
retry_times_at_unknown_error=2, retry_times_at_unknown_error=2,
): ):
""" """
Request GPT model请求GPT模型同时维持用户界面活跃。 Request GPT model请求GPT模型同时维持用户界面活跃。
@ -75,7 +77,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
exceeded_cnt = 0 exceeded_cnt = 0
while True: while True:
# watchdog error # watchdog error
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5: if len(mutable) >= 2 and (time.time() - mutable[1]) > 5:
raise RuntimeError("检测到程序终止。") raise RuntimeError("检测到程序终止。")
try: try:
# 【第一种情况】:顺利完成 # 【第一种情况】:顺利完成
@ -92,7 +94,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error)) p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096 MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO) inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN - EXCEED_ALLO)
mutable[0] += f'[Local Message] 警告文本过长将进行截断Token溢出数{n_exceed}\n\n' mutable[0] += f'[Local Message] 警告文本过长将进行截断Token溢出数{n_exceed}\n\n'
continue # 返回重试 continue # 返回重试
else: else:
@ -107,7 +109,8 @@ def request_gpt_model_in_new_thread_with_ui_alive(
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback\n\n{tb_str}\n\n" mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback\n\n{tb_str}\n\n"
if retry_op > 0: if retry_op > 0:
retry_op -= 1 retry_op -= 1
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}\n\n" mutable[
0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error - retry_op}/{retry_times_at_unknown_error}\n\n"
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str): if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
time.sleep(30) time.sleep(30)
time.sleep(5) time.sleep(5)
@ -140,7 +143,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
refresh_interval=0.2, max_workers=-1, scroller_max_len=30, refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
handle_token_exceed=True, show_user_at_complete=False, handle_token_exceed=True, show_user_at_complete=False,
retry_times_at_unknown_error=2, retry_times_at_unknown_error=2,
): ):
""" """
Request GPT model using multiple threads with UI and high efficiency Request GPT model using multiple threads with UI and high efficiency
请求GPT模型的[多线程]版。 请求GPT模型的[多线程]版。
@ -174,8 +177,10 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
assert len(inputs_array) == len(history_array) assert len(inputs_array) == len(history_array)
assert len(inputs_array) == len(sys_prompt_array) assert len(inputs_array) == len(sys_prompt_array)
if max_workers == -1: # 读取配置文件 if max_workers == -1: # 读取配置文件
try: max_workers, = get_conf('DEFAULT_WORKER_NUM') try:
except: max_workers = 8 max_workers, = get_conf('DEFAULT_WORKER_NUM')
except:
max_workers = 8
if max_workers <= 0: max_workers = 3 if max_workers <= 0: max_workers = 3
# 屏蔽掉 chatglm的多线程可能会导致严重卡顿 # 屏蔽掉 chatglm的多线程可能会导致严重卡顿
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')): if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
@ -197,7 +202,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
mutable[index][2] = "执行中" mutable[index][2] = "执行中"
while True: while True:
# watchdog error # watchdog error
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5: if len(mutable[index]) >= 2 and (time.time() - mutable[index][1]) > 5:
raise RuntimeError("检测到程序终止。") raise RuntimeError("检测到程序终止。")
try: try:
# 【第一种情况】:顺利完成 # 【第一种情况】:顺利完成
@ -217,7 +222,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error)) p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096 MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO) inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN - EXCEED_ALLO)
gpt_say += f'[Local Message] 警告文本过长将进行截断Token溢出数{n_exceed}\n\n' gpt_say += f'[Local Message] 警告文本过长将进行截断Token溢出数{n_exceed}\n\n'
mutable[index][2] = f"截断重试" mutable[index][2] = f"截断重试"
continue # 返回重试 continue # 返回重试
@ -246,9 +251,11 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
fail_info = "" fail_info = ""
# 也许等待十几秒后,情况会好转 # 也许等待十几秒后,情况会好转
for i in range(wait): for i in range(wait):
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1) mutable[index][2] = f"{fail_info}等待重试 {wait - i}";
time.sleep(1)
# 开始重试 # 开始重试
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}" mutable[index][
2] = f"重试中 {retry_times_at_unknown_error - retry_op}/{retry_times_at_unknown_error}"
continue # 返回重试 continue # 返回重试
else: else:
mutable[index][2] = "已失败" mutable[index][2] = "已失败"
@ -257,7 +264,8 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
return gpt_say # 放弃 return gpt_say # 放弃
# 异步任务开始 # 异步任务开始
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip( futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in
zip(
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)] range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
cnt = 0 cnt = 0
while True: while True:
@ -272,16 +280,16 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
mutable[thread_index][1] = time.time() mutable[thread_index][1] = time.time()
# 在前端打印些好玩的东西 # 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done): for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\ print_something_really_funny = "[ ...`" + mutable[thread_index][0][-scroller_max_len:]. \
replace('\n', '').replace('```', '...').replace( replace('\n', '').replace('```', '...').replace(
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]" ' ', '.').replace('<br/>', '.....').replace('$', '.') + "`... ]"
observe_win.append(print_something_really_funny) observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西 # 在前端打印些好玩的东西
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n' stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
if not done else f'`{mutable[thread_index][2]}`\n\n' if not done else f'`{mutable[thread_index][2]}`\n\n'
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)]) for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
# 在前端打印些好玩的东西 # 在前端打印些好玩的东西
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))] chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.'] * (cnt % 10 + 1))]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
if all(worker_done): if all(worker_done):
executor.shutdown() executor.shutdown()
@ -311,6 +319,7 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
lines = txt_tocut.split('\n') lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines) estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut) estimated_line_cut = int(estimated_line_cut)
cnt = 0
for cnt in reversed(range(estimated_line_cut)): for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line: if must_break_at_empty_line:
if lines[cnt] != "": if lines[cnt] != "":
@ -327,6 +336,7 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
result = [prev] result = [prev]
result.extend(cut(post, must_break_at_empty_line)) result.extend(cut(post, must_break_at_empty_line))
return result return result
try: try:
return cut(txt, must_break_at_empty_line=True) return cut(txt, must_break_at_empty_line=True)
except RuntimeError: except RuntimeError:
@ -342,6 +352,7 @@ def force_breakdown(txt, limit, get_token_fn):
return txt[:i], txt[i:] return txt[:i], txt[i:]
return "Tiktoken未知错误", "Tiktoken未知错误" return "Tiktoken未知错误", "Tiktoken未知错误"
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit): def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
# 递归 # 递归
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False): def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
@ -370,6 +381,7 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
result = [prev] result = [prev]
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway)) result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
return result return result
try: try:
# 第1次尝试将双空行\n\n作为切分点 # 第1次尝试将双空行\n\n作为切分点
return cut(txt, must_break_at_empty_line=True) return cut(txt, must_break_at_empty_line=True)
@ -392,7 +404,6 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
return cut(txt, must_break_at_empty_line=False, break_anyway=True) return cut(txt, must_break_at_empty_line=False, break_anyway=True)
def read_and_clean_pdf_text(fp): def read_and_clean_pdf_text(fp):
""" """
这个函数用于分割pdf用了很多trick逻辑较乱效果奇好 这个函数用于分割pdf用了很多trick逻辑较乱效果奇好
@ -422,6 +433,7 @@ def read_and_clean_pdf_text(fp):
fb = 2 # Index 2 框框 fb = 2 # Index 2 框框
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等) REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的判定为不是正文有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化) REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的判定为不是正文有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
def primary_ffsize(l): def primary_ffsize(l):
""" """
提取文本块主字体 提取文本块主字体
@ -432,11 +444,11 @@ def read_and_clean_pdf_text(fp):
fsize_statiscs[wtf['size']] += len(wtf['text']) fsize_statiscs[wtf['size']] += len(wtf['text'])
return max(fsize_statiscs, key=fsize_statiscs.get) return max(fsize_statiscs, key=fsize_statiscs.get)
def ffsize_same(a,b): def ffsize_same(a, b):
""" """
提取字体大小是否近似相等 提取字体大小是否近似相等
""" """
return abs((a-b)/max(a,b)) < 0.02 return abs((a - b) / max(a, b)) < 0.02
with fitz.open(fp) as doc: with fitz.open(fp) as doc:
meta_txt = [] meta_txt = []
@ -459,7 +471,8 @@ def read_and_clean_pdf_text(fp):
for wtf in l['spans']: # for l in t['lines']: for wtf in l['spans']: # for l in t['lines']:
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])]) meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
# meta_line.append(["NEW_BLOCK", pf]) # meta_line.append(["NEW_BLOCK", pf])
# 块元提取 for each word segment with in line for each line cross-line words for each block # 块元提取 for each word segment with in line for each line
# cross-line words for each block
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace( meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t]) '- ', '') for t in text_areas['blocks'] if 'lines' in t])
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']]) meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
@ -487,18 +500,19 @@ def read_and_clean_pdf_text(fp):
if REMOVE_FOOT_NOTE: if REMOVE_FOOT_NOTE:
if meta_line[index][fs] <= give_up_fize_threshold: if meta_line[index][fs] <= give_up_fize_threshold:
continue continue
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]): if ffsize_same(meta_line[index][fs], meta_line[index - 1][fs]):
# 尝试识别段落 # 尝试识别段落
if meta_line[index][fc].endswith('.') and\ if meta_line[index][fc].endswith('.') and \
(meta_line[index-1][fc] != 'NEW_BLOCK') and \ (meta_line[index - 1][fc] != 'NEW_BLOCK') and \
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7: (meta_line[index][fb][2] - meta_line[index][fb][0]) < (
meta_line[index - 1][fb][2] - meta_line[index - 1][fb][0]) * 0.7:
sec[-1] += line[fc] sec[-1] += line[fc]
sec[-1] += "\n\n" sec[-1] += "\n\n"
else: else:
sec[-1] += " " sec[-1] += " "
sec[-1] += line[fc] sec[-1] += line[fc]
else: else:
if (index+1 < len(meta_line)) and \ if (index + 1 < len(meta_line)) and \
meta_line[index][fs] > main_fsize: meta_line[index][fs] > main_fsize:
# 单行 + 字体大 # 单行 + 字体大
mega_sec.append(copy.deepcopy(sec)) mega_sec.append(copy.deepcopy(sec))
@ -506,7 +520,7 @@ def read_and_clean_pdf_text(fp):
sec.append("# " + line[fc]) sec.append("# " + line[fc])
else: else:
# 尝试识别section # 尝试识别section
if meta_line[index-1][fs] > meta_line[index][fs]: if meta_line[index - 1][fs] > meta_line[index][fs]:
sec.append("\n" + line[fc]) sec.append("\n" + line[fc])
else: else:
sec.append(line[fc]) sec.append(line[fc])
@ -525,13 +539,15 @@ def read_and_clean_pdf_text(fp):
if len(block_txt) < 100: if len(block_txt) < 100:
meta_txt[index] = '\n' meta_txt[index] = '\n'
return meta_txt return meta_txt
meta_txt = 把字符太少的块清除为回车(meta_txt) meta_txt = 把字符太少的块清除为回车(meta_txt)
def 清理多余的空行(meta_txt): def 清理多余的空行(meta_txt):
for index in reversed(range(1, len(meta_txt))): for index in reversed(range(1, len(meta_txt))):
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n': if meta_txt[index] == '\n' and meta_txt[index - 1] == '\n':
meta_txt.pop(index) meta_txt.pop(index)
return meta_txt return meta_txt
meta_txt = 清理多余的空行(meta_txt) meta_txt = 清理多余的空行(meta_txt)
def 合并小写开头的段落块(meta_txt): def 合并小写开头的段落块(meta_txt):
@ -542,16 +558,18 @@ def read_and_clean_pdf_text(fp):
return True return True
else: else:
return False return False
for _ in range(100): for _ in range(100):
for index, block_txt in enumerate(meta_txt): for index, block_txt in enumerate(meta_txt):
if starts_with_lowercase_word(block_txt): if starts_with_lowercase_word(block_txt):
if meta_txt[index-1] != '\n': if meta_txt[index - 1] != '\n':
meta_txt[index-1] += ' ' meta_txt[index - 1] += ' '
else: else:
meta_txt[index-1] = '' meta_txt[index - 1] = ''
meta_txt[index-1] += meta_txt[index] meta_txt[index - 1] += meta_txt[index]
meta_txt[index] = '\n' meta_txt[index] = '\n'
return meta_txt return meta_txt
meta_txt = 合并小写开头的段落块(meta_txt) meta_txt = 合并小写开头的段落块(meta_txt)
meta_txt = 清理多余的空行(meta_txt) meta_txt = 清理多余的空行(meta_txt)
@ -593,9 +611,10 @@ def get_files_from_everything(txt, type): # type='.md'
from toolbox import get_conf from toolbox import get_conf
proxies, = get_conf('proxies') proxies, = get_conf('proxies')
r = requests.get(txt, proxies=proxies) r = requests.get(txt, proxies=proxies)
with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content) with open('./gpt_log/temp' + type, 'wb+') as f:
f.write(r.content)
project_folder = './gpt_log/' project_folder = './gpt_log/'
file_manifest = ['./gpt_log/temp'+type] file_manifest = ['./gpt_log/temp' + type]
elif txt.endswith(type): elif txt.endswith(type):
# 直接给定文件 # 直接给定文件
file_manifest = [txt] file_manifest = [txt]
@ -603,7 +622,7 @@ def get_files_from_everything(txt, type): # type='.md'
elif os.path.exists(txt): elif os.path.exists(txt):
# 本地路径,递归搜索 # 本地路径,递归搜索
project_folder = txt project_folder = txt
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)] file_manifest = [f for f in glob.glob(f'{project_folder}/**/*' + type, recursive=True)]
if len(file_manifest) == 0: if len(file_manifest) == 0:
success = False success = False
else: else:
@ -614,8 +633,6 @@ def get_files_from_everything(txt, type): # type='.md'
return success, file_manifest, project_folder return success, file_manifest, project_folder
def Singleton(cls): def Singleton(cls):
_instance = {} _instance = {}
@ -647,7 +664,6 @@ class knowledge_archive_interface():
return self.text2vec_large_chinese return self.text2vec_large_chinese
def feed_archive(self, file_manifest, id="default"): def feed_archive(self, file_manifest, id="default"):
self.threadLock.acquire() self.threadLock.acquire()
# import uuid # import uuid
@ -660,7 +676,7 @@ class knowledge_archive_interface():
history=[], history=[],
one_conent="", one_conent="",
one_content_segmentation="", one_content_segmentation="",
text2vec = self.get_chinese_text2vec(), text2vec=self.get_chinese_text2vec(),
) )
self.threadLock.release() self.threadLock.release()
@ -682,23 +698,24 @@ class knowledge_archive_interface():
history=[], history=[],
one_conent="", one_conent="",
one_content_segmentation="", one_content_segmentation="",
text2vec = self.get_chinese_text2vec(), text2vec=self.get_chinese_text2vec(),
) )
VECTOR_SEARCH_SCORE_THRESHOLD = 0 VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4 VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512 CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test( resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt, query=txt,
vs_path = self.kai_path, vs_path=self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD, score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True, chunk_conent=True,
chunk_size=CHUNK_SIZE, chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(), text2vec=self.get_chinese_text2vec(),
) )
self.threadLock.release() self.threadLock.release()
return resp, prompt return resp, prompt
def try_install_deps(deps): def try_install_deps(deps):
for dep in deps: for dep in deps:
import subprocess, sys import subprocess, sys

View File

@ -17,3 +17,4 @@ numpy
arxiv arxiv
rich rich
langchain langchain
zh_langchain