Add zh_ Langchain into dependent files
This commit is contained in:
@ -6,7 +6,9 @@ def input_clipping(inputs, history, max_token_limit):
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from request_llm.bridge_all import model_info
|
from request_llm.bridge_all import model_info
|
||||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
|
||||||
|
def get_token_num(txt):
|
||||||
|
return len(enc.encode(txt, disallowed_special=()))
|
||||||
|
|
||||||
mode = 'input-and-history'
|
mode = 'input-and-history'
|
||||||
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
||||||
@ -107,7 +109,8 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|||||||
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||||
if retry_op > 0:
|
if retry_op > 0:
|
||||||
retry_op -= 1
|
retry_op -= 1
|
||||||
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
|
mutable[
|
||||||
|
0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error - retry_op}/{retry_times_at_unknown_error}:\n\n"
|
||||||
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
|
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
@ -174,8 +177,10 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
assert len(inputs_array) == len(history_array)
|
assert len(inputs_array) == len(history_array)
|
||||||
assert len(inputs_array) == len(sys_prompt_array)
|
assert len(inputs_array) == len(sys_prompt_array)
|
||||||
if max_workers == -1: # 读取配置文件
|
if max_workers == -1: # 读取配置文件
|
||||||
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
|
try:
|
||||||
except: max_workers = 8
|
max_workers, = get_conf('DEFAULT_WORKER_NUM')
|
||||||
|
except:
|
||||||
|
max_workers = 8
|
||||||
if max_workers <= 0: max_workers = 3
|
if max_workers <= 0: max_workers = 3
|
||||||
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
||||||
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
|
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
|
||||||
@ -246,9 +251,11 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
fail_info = ""
|
fail_info = ""
|
||||||
# 也许等待十几秒后,情况会好转
|
# 也许等待十几秒后,情况会好转
|
||||||
for i in range(wait):
|
for i in range(wait):
|
||||||
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
|
mutable[index][2] = f"{fail_info}等待重试 {wait - i}";
|
||||||
|
time.sleep(1)
|
||||||
# 开始重试
|
# 开始重试
|
||||||
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
|
mutable[index][
|
||||||
|
2] = f"重试中 {retry_times_at_unknown_error - retry_op}/{retry_times_at_unknown_error}"
|
||||||
continue # 返回重试
|
continue # 返回重试
|
||||||
else:
|
else:
|
||||||
mutable[index][2] = "已失败"
|
mutable[index][2] = "已失败"
|
||||||
@ -257,7 +264,8 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
return gpt_say # 放弃
|
return gpt_say # 放弃
|
||||||
|
|
||||||
# 异步任务开始
|
# 异步任务开始
|
||||||
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
|
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in
|
||||||
|
zip(
|
||||||
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
|
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
|
||||||
cnt = 0
|
cnt = 0
|
||||||
while True:
|
while True:
|
||||||
@ -311,6 +319,7 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
|||||||
lines = txt_tocut.split('\n')
|
lines = txt_tocut.split('\n')
|
||||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||||
estimated_line_cut = int(estimated_line_cut)
|
estimated_line_cut = int(estimated_line_cut)
|
||||||
|
cnt = 0
|
||||||
for cnt in reversed(range(estimated_line_cut)):
|
for cnt in reversed(range(estimated_line_cut)):
|
||||||
if must_break_at_empty_line:
|
if must_break_at_empty_line:
|
||||||
if lines[cnt] != "":
|
if lines[cnt] != "":
|
||||||
@ -327,6 +336,7 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
|||||||
result = [prev]
|
result = [prev]
|
||||||
result.extend(cut(post, must_break_at_empty_line))
|
result.extend(cut(post, must_break_at_empty_line))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return cut(txt, must_break_at_empty_line=True)
|
return cut(txt, must_break_at_empty_line=True)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
@ -342,6 +352,7 @@ def force_breakdown(txt, limit, get_token_fn):
|
|||||||
return txt[:i], txt[i:]
|
return txt[:i], txt[i:]
|
||||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||||
|
|
||||||
|
|
||||||
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
||||||
# 递归
|
# 递归
|
||||||
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||||
@ -370,6 +381,7 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
|||||||
result = [prev]
|
result = [prev]
|
||||||
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||||
return cut(txt, must_break_at_empty_line=True)
|
return cut(txt, must_break_at_empty_line=True)
|
||||||
@ -392,7 +404,6 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
|||||||
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def read_and_clean_pdf_text(fp):
|
def read_and_clean_pdf_text(fp):
|
||||||
"""
|
"""
|
||||||
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
|
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
|
||||||
@ -422,6 +433,7 @@ def read_and_clean_pdf_text(fp):
|
|||||||
fb = 2 # Index 2 框框
|
fb = 2 # Index 2 框框
|
||||||
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
|
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
|
||||||
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
|
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
|
||||||
|
|
||||||
def primary_ffsize(l):
|
def primary_ffsize(l):
|
||||||
"""
|
"""
|
||||||
提取文本块主字体
|
提取文本块主字体
|
||||||
@ -459,7 +471,8 @@ def read_and_clean_pdf_text(fp):
|
|||||||
for wtf in l['spans']: # for l in t['lines']:
|
for wtf in l['spans']: # for l in t['lines']:
|
||||||
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
|
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
|
||||||
# meta_line.append(["NEW_BLOCK", pf])
|
# meta_line.append(["NEW_BLOCK", pf])
|
||||||
# 块元提取 for each word segment with in line for each line cross-line words for each block
|
# 块元提取 for each word segment with in line for each line
|
||||||
|
# cross-line words for each block
|
||||||
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
|
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
|
||||||
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
|
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
|
||||||
@ -491,7 +504,8 @@ def read_and_clean_pdf_text(fp):
|
|||||||
# 尝试识别段落
|
# 尝试识别段落
|
||||||
if meta_line[index][fc].endswith('.') and \
|
if meta_line[index][fc].endswith('.') and \
|
||||||
(meta_line[index - 1][fc] != 'NEW_BLOCK') and \
|
(meta_line[index - 1][fc] != 'NEW_BLOCK') and \
|
||||||
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
|
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (
|
||||||
|
meta_line[index - 1][fb][2] - meta_line[index - 1][fb][0]) * 0.7:
|
||||||
sec[-1] += line[fc]
|
sec[-1] += line[fc]
|
||||||
sec[-1] += "\n\n"
|
sec[-1] += "\n\n"
|
||||||
else:
|
else:
|
||||||
@ -525,6 +539,7 @@ def read_and_clean_pdf_text(fp):
|
|||||||
if len(block_txt) < 100:
|
if len(block_txt) < 100:
|
||||||
meta_txt[index] = '\n'
|
meta_txt[index] = '\n'
|
||||||
return meta_txt
|
return meta_txt
|
||||||
|
|
||||||
meta_txt = 把字符太少的块清除为回车(meta_txt)
|
meta_txt = 把字符太少的块清除为回车(meta_txt)
|
||||||
|
|
||||||
def 清理多余的空行(meta_txt):
|
def 清理多余的空行(meta_txt):
|
||||||
@ -532,6 +547,7 @@ def read_and_clean_pdf_text(fp):
|
|||||||
if meta_txt[index] == '\n' and meta_txt[index - 1] == '\n':
|
if meta_txt[index] == '\n' and meta_txt[index - 1] == '\n':
|
||||||
meta_txt.pop(index)
|
meta_txt.pop(index)
|
||||||
return meta_txt
|
return meta_txt
|
||||||
|
|
||||||
meta_txt = 清理多余的空行(meta_txt)
|
meta_txt = 清理多余的空行(meta_txt)
|
||||||
|
|
||||||
def 合并小写开头的段落块(meta_txt):
|
def 合并小写开头的段落块(meta_txt):
|
||||||
@ -542,6 +558,7 @@ def read_and_clean_pdf_text(fp):
|
|||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for _ in range(100):
|
for _ in range(100):
|
||||||
for index, block_txt in enumerate(meta_txt):
|
for index, block_txt in enumerate(meta_txt):
|
||||||
if starts_with_lowercase_word(block_txt):
|
if starts_with_lowercase_word(block_txt):
|
||||||
@ -552,6 +569,7 @@ def read_and_clean_pdf_text(fp):
|
|||||||
meta_txt[index - 1] += meta_txt[index]
|
meta_txt[index - 1] += meta_txt[index]
|
||||||
meta_txt[index] = '\n'
|
meta_txt[index] = '\n'
|
||||||
return meta_txt
|
return meta_txt
|
||||||
|
|
||||||
meta_txt = 合并小写开头的段落块(meta_txt)
|
meta_txt = 合并小写开头的段落块(meta_txt)
|
||||||
meta_txt = 清理多余的空行(meta_txt)
|
meta_txt = 清理多余的空行(meta_txt)
|
||||||
|
|
||||||
@ -593,7 +611,8 @@ def get_files_from_everything(txt, type): # type='.md'
|
|||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
proxies, = get_conf('proxies')
|
proxies, = get_conf('proxies')
|
||||||
r = requests.get(txt, proxies=proxies)
|
r = requests.get(txt, proxies=proxies)
|
||||||
with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content)
|
with open('./gpt_log/temp' + type, 'wb+') as f:
|
||||||
|
f.write(r.content)
|
||||||
project_folder = './gpt_log/'
|
project_folder = './gpt_log/'
|
||||||
file_manifest = ['./gpt_log/temp' + type]
|
file_manifest = ['./gpt_log/temp' + type]
|
||||||
elif txt.endswith(type):
|
elif txt.endswith(type):
|
||||||
@ -614,8 +633,6 @@ def get_files_from_everything(txt, type): # type='.md'
|
|||||||
return success, file_manifest, project_folder
|
return success, file_manifest, project_folder
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def Singleton(cls):
|
def Singleton(cls):
|
||||||
_instance = {}
|
_instance = {}
|
||||||
|
|
||||||
@ -647,7 +664,6 @@ class knowledge_archive_interface():
|
|||||||
|
|
||||||
return self.text2vec_large_chinese
|
return self.text2vec_large_chinese
|
||||||
|
|
||||||
|
|
||||||
def feed_archive(self, file_manifest, id="default"):
|
def feed_archive(self, file_manifest, id="default"):
|
||||||
self.threadLock.acquire()
|
self.threadLock.acquire()
|
||||||
# import uuid
|
# import uuid
|
||||||
@ -699,6 +715,7 @@ class knowledge_archive_interface():
|
|||||||
self.threadLock.release()
|
self.threadLock.release()
|
||||||
return resp, prompt
|
return resp, prompt
|
||||||
|
|
||||||
|
|
||||||
def try_install_deps(deps):
|
def try_install_deps(deps):
|
||||||
for dep in deps:
|
for dep in deps:
|
||||||
import subprocess, sys
|
import subprocess, sys
|
||||||
|
|||||||
@ -17,3 +17,4 @@ numpy
|
|||||||
arxiv
|
arxiv
|
||||||
rich
|
rich
|
||||||
langchain
|
langchain
|
||||||
|
zh_langchain
|
||||||
Reference in New Issue
Block a user