Update all slightly
This commit is contained in:
@ -1,78 +0,0 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
|
||||
class read_lawfile:
|
||||
def __init__(self, chapter_moder=r"第[零一二三四五六七八九十百千万]+章 .+\b", entry_mode=r"第[零一二三四五六七八九十百千万]+条\b"):
|
||||
# 识别章和节
|
||||
self.chapter_mode = chapter_moder
|
||||
self.entry_mode = entry_mode
|
||||
|
||||
def read_file(self, file_path):
|
||||
# 读取文件
|
||||
self.law = {}
|
||||
f = open(file_path, encoding='utf-8')
|
||||
content = f.read()
|
||||
content = content.replace("\n\n", "\n")
|
||||
content = content.replace("##", "")
|
||||
# print(content)
|
||||
chapter_p = re.search(self.chapter_mode, content)
|
||||
while chapter_p is not None:
|
||||
c_start = chapter_p.start()
|
||||
c_end = chapter_p.end()
|
||||
key = content[c_start:c_end]
|
||||
content = content[c_end:]
|
||||
|
||||
chapter_p = re.search(self.chapter_mode, content)
|
||||
if chapter_p is not None:
|
||||
end = chapter_p.start()
|
||||
c_content = content[:end]
|
||||
self.law[key] = self.read_entrys(c_content)
|
||||
# print(content[c_start:c_end])
|
||||
else:
|
||||
self.law[key] = self.read_entrys(content)
|
||||
f.close()
|
||||
return self.law
|
||||
|
||||
def read_entrys(self, content):
|
||||
entrys = {}
|
||||
entry_p = re.search(self.entry_mode, content)
|
||||
while entry_p is not None:
|
||||
e_start = entry_p.start()
|
||||
e_end = entry_p.end()
|
||||
key = content[e_start:e_end]
|
||||
content = content[e_end+1:]
|
||||
|
||||
entry_p = re.search(self.entry_mode, content)
|
||||
if entry_p is not None:
|
||||
end = entry_p.start()
|
||||
e_content = content[:end]
|
||||
entrys[key] = e_content
|
||||
else:
|
||||
entrys[key] = content
|
||||
return entrys
|
||||
# entry_p = re.search(entry_mode, content)
|
||||
# while entry_p is not None:
|
||||
# start = entry_p.start()
|
||||
# end = entry_p.end()
|
||||
# # print(content[start:end])
|
||||
# content = content[end:]
|
||||
# law[content[start:end]] = read_entrys(content)
|
||||
# chapter_p = re.search(chapter_mode, content)
|
||||
|
||||
def show(self):
|
||||
for key in self.law:
|
||||
print(key, '\n')
|
||||
for item in self.law[key]:
|
||||
print(item, ' ', self.law[key][item])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
file_path = "D:/11496/Documents/project/Laws-master/经济法/价格法(1997-12-29).md"
|
||||
r = read_lawfile()
|
||||
dict = r.read_file(file_path)
|
||||
r.show()
|
||||
print(dict)
|
||||
with open('./a.json', 'w') as f:
|
||||
# json.dumps(dict, f, ensure_ascii=False)
|
||||
json.dump(dict, f, ensure_ascii=False)
|
||||
@ -1,50 +0,0 @@
|
||||
import argparse
|
||||
import openai
|
||||
import yaml
|
||||
import random
|
||||
|
||||
|
||||
def return_random_prompt():
|
||||
system_prompt = "你需要针对法条内容尽可能联想多样化的场景生成问答数据。我们将用于人工评估 ChatGPT 模型对指令的完成情况。要求:\n"
|
||||
|
||||
# generate random tasks
|
||||
system_prompt += "1. 结合真实问题,表述多样化。\n"
|
||||
|
||||
# other requirements
|
||||
system_prompt += "2. 如果遇到无法处理的指令(只靠文本无法回答),给出无法处理的回复。\n"
|
||||
system_prompt += "3. 除非特别要求,请使用中文,指令可以是命令句、疑问句、或其他合适的类型。\n"
|
||||
system_prompt += "4. <Reference>:违反本法规定,对妇女实施性骚扰的,由公安机关给予批评教育或者出具告诫书,并由所在单位依法给予处分。\n学校、用人单位违反本法规定,未采取必要措施预防和制止性骚扰,造成妇女权益受到侵害或者社会影响恶劣的,由上级机关或者主管部门责令改正;拒不改正或者情节严重的,依法对直接负责的主管人员和其他直接责任人员给予处分。\n"
|
||||
system_prompt += "5. <input>是结合法条内容联想到的真实场景下的问题。要求该场景下存在违法者和受害人\n"
|
||||
system_prompt += "6. <output>是结合法条内容对该问题的适当且真实的回应,不能只回复答应或拒绝请求。尽可能地指明违法行为可能遭受的惩罚,并向受害者提出维权建议。\n\n"
|
||||
system_prompt += "请给出满足条件的10条JSON格式数据:\n"
|
||||
|
||||
return system_prompt
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--cfg_path', default='../config.yaml', type=str)
|
||||
parser.add_argument('--save_path', default='./output.json', type=str)
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(args.cfg_path, 'r') as f:
|
||||
cfg = yaml.load(f, Loader=yaml.FullLoader)
|
||||
|
||||
openai.api_key = cfg['API_KEY']
|
||||
openai.api_base = cfg['API_BASE_URL']
|
||||
|
||||
output_file = open(args.save_path, 'w')
|
||||
|
||||
# number of data to generate (each prompt contains 20 JSON-formatted data)
|
||||
# TODO: 改成流式的,不然会中途断掉
|
||||
MAX_EPOCHS = 1
|
||||
for k in range(MAX_EPOCHS):
|
||||
response = openai.ChatCompletion.create(
|
||||
# here we use `gpt-3.5-turbo` model, while Stanford-Alpaca uses `text-davinci-003`
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "user", "content": return_random_prompt()},
|
||||
]
|
||||
)
|
||||
output_file.write(response["choices"][0]["message"]["content"] + '\n')
|
||||
output_file.close()
|
||||
@ -1,51 +0,0 @@
|
||||
import argparse
|
||||
import openai
|
||||
import yaml
|
||||
import sys
|
||||
import random
|
||||
|
||||
|
||||
def return_random_prompt():
|
||||
system_prompt = "你需要针对输入尽可能给出多样化的任务指令和对应的回答。我们将用于人工评估ChatGPT模型对指令的完成情况。要求:\n"
|
||||
|
||||
# generate random tasks
|
||||
task_list = ["开放式生成", "分类", "问答", "编辑", "摘要", "写作", "分析", "抽取"]
|
||||
system_prompt += "1. 表述多样化,结合真实问题;指令类型多样化,例如:" + "、".join(random.sample(task_list, 7)) + "等。\n"
|
||||
|
||||
# other requirements
|
||||
system_prompt += "2. 如果遇到无法处理的指令(只靠文本无法回答),给出无法处理的回复。\n"
|
||||
system_prompt += "3. 除非特别要求,请使用中文,指令可以是命令句、疑问句、或其他合适的类型。\n"
|
||||
system_prompt += "4. <input>是:'第十三条 一切危害国家主权、领土完整和安全,分裂国家、颠覆人民民主专政的政权和推翻社会主义制度,破坏社会秩序和经济秩序,侵犯国有财产或者劳动群众集体所有的财产,侵犯公民私人所有的财产,侵犯公民的人身权利、民主权利和其他权利,以及其他危害社会的行为,依照法律应当受刑罚处罚的,都是犯罪,但是情节显著轻微危害不大的,不认为是犯罪。'"
|
||||
system_prompt += "5. <output>应该是对指令的适当且真实的回应,不能只回复答应或拒绝请求。如果需要额外信息才能回复时,请努力预测用户意图并尝试回复。<output>的内容应少于" + str(random.randint(128, 512)) + "字。\n\n"
|
||||
system_prompt += "请给出满足条件的20条JSON格式数据:\n"
|
||||
|
||||
return system_prompt
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--cfg_path', default='../config.yaml', type=str)
|
||||
parser.add_argument('--save_path', default='./output.json', type=str)
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(args.cfg_path, 'r') as f:
|
||||
cfg = yaml.load(f, Loader=yaml.FullLoader)
|
||||
|
||||
openai.api_key = cfg['API_KEY']
|
||||
openai.api_base = cfg['API_BASE_URL']
|
||||
|
||||
output_file = open(args.save_path, 'w')
|
||||
|
||||
# number of data to generate (each prompt contains 20 JSON-formatted data)
|
||||
# TODO: 改成流式的,不然会中途断掉
|
||||
MAX_EPOCHS = 1
|
||||
for k in range(MAX_EPOCHS):
|
||||
response = openai.ChatCompletion.create(
|
||||
# here we use `gpt-3.5-turbo` model, while Stanford-Alpaca uses `text-davinci-003`
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "user", "content": return_random_prompt()},
|
||||
]
|
||||
)
|
||||
output_file.write(response["choices"][0]["message"]["content"] + '\n')
|
||||
output_file.close()
|
||||
@ -1,63 +0,0 @@
|
||||
from transformers import LlamaTokenizer
|
||||
from sentencepiece import sentencepiece_model_pb2 as model
|
||||
import sentencepiece as sp
|
||||
import argparse
|
||||
import os
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Load arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--load_path', default='../src/models/base_model/chinese_llama_7b/tokenizer_chinese.model', type=str)
|
||||
parser.add_argument('--save_dir', default='../src/models/base_model/save_chinese', type=str)
|
||||
parser.add_argument('--voc_path', default='../data/vocabulary/legal_vocab_processed.txt', type=str)
|
||||
args = parser.parse_args()
|
||||
|
||||
LOAD_PATH = args.load_path
|
||||
SAVE_DIR = args.save_dir
|
||||
VOC_PATH = args.voc_path
|
||||
|
||||
# Load pre-trained llama tokenizer and sentencepiece model
|
||||
llama_spm = model.ModelProto()
|
||||
llama_spm.ParseFromString(open(LOAD_PATH, "rb").read())
|
||||
|
||||
# show size of llama's vocabulary
|
||||
llama_spm_tokens_set = set(p.piece for p in llama_spm.pieces)
|
||||
print(f"Size of initial llama's vocabulary: {len(llama_spm_tokens_set)}")
|
||||
|
||||
# Load custom vocabulary
|
||||
new_tokens = open(VOC_PATH, "r").read().split("\n")
|
||||
for token in new_tokens:
|
||||
if token not in llama_spm_tokens_set:
|
||||
new_token = model.ModelProto().SentencePiece()
|
||||
new_token.piece = token
|
||||
new_token.score = 0
|
||||
llama_spm.pieces.append(new_token)
|
||||
print(f"Size of merged llama's vocabulary: {len(llama_spm.pieces)}")
|
||||
|
||||
# save
|
||||
os.makedirs(SAVE_DIR, exist_ok=True)
|
||||
SAVE_MODEL_PATH = os.path.join(SAVE_DIR, 'tokenizer.model')
|
||||
SAVE_VOCAB_PATH = os.path.join(SAVE_DIR, 'tokenizer.vocab')
|
||||
with open(SAVE_MODEL_PATH, 'wb') as f:
|
||||
f.write(llama_spm.SerializeToString())
|
||||
with open(SAVE_VOCAB_PATH, 'w') as f:
|
||||
f.writelines([f'{token.piece} {token.score}\n' for token in llama_spm.pieces])
|
||||
tokenizer = LlamaTokenizer(SAVE_MODEL_PATH)
|
||||
tokenizer.save_pretrained(SAVE_DIR)
|
||||
print(f'New llama tokenizer and spm has been saved to {SAVE_DIR}')
|
||||
|
||||
# test
|
||||
llama_tokenizer_old = LlamaTokenizer.from_pretrained(LOAD_PATH)
|
||||
llama_tokenizer_new = LlamaTokenizer.from_pretrained(SAVE_DIR)
|
||||
text = '''登记错误赔偿责任登记等手续登记等手续生效登记机构和登记办法登记机构赔偿后登记机构应当提供登记收费问题'''
|
||||
|
||||
print(f'Size of old vocabulary: {llama_tokenizer_old.vocab_size}')
|
||||
print(f'Size of new vocabulary: {llama_tokenizer_new.vocab_size}')
|
||||
print('All special tokens and ids in new llama:')
|
||||
print(llama_tokenizer_new.all_special_tokens)
|
||||
print(llama_tokenizer_new.all_special_ids)
|
||||
print(llama_tokenizer_new.special_tokens_map)
|
||||
|
||||
print(f'Text:\n{text}')
|
||||
print(f'Tokenized by LLaMA tokenizer:\n {llama_tokenizer_old.tokenize(text)}')
|
||||
print(f'Tokenized by NEW LLaMA tokenizer:\n {llama_tokenizer_new.tokenize(text)}')
|
||||
Reference in New Issue
Block a user