Initialize repo

This commit is contained in:
Pengxiao Song
2023-04-27 21:42:17 +08:00
parent eaa15baf8b
commit a19b9e1476
7 changed files with 235 additions and 2 deletions

View File

@ -0,0 +1,59 @@
import argparse
import openai
import yaml
import sys
import random
def return_random_prompt():
system_prompt = "你需要尽可能给出多样化的任务指令和对应的回答。我们将用于人工评估ChatGPT模型对指令的完成情况。要求:\n"
# generate random topics
system_prompt += "1. 主题多样化,涵盖法律诉讼的各个领域,例如:刑法、民法、行政法等。\n"
# generate random tasks
task_list = ["开放式生成", "分类", "问答", "编辑", "摘要",
"写作", "翻译", "分析", "常识推理", "写信", "抽取", "推荐"]
system_prompt += "2. 表述多样化,结合真实问题;指令类型多样化,例如:" + \
"".join(random.sample(task_list, 10)) + "等。\n"
# other requirements
system_prompt += "3. 如果遇到无法处理的指令(只靠文本无法回答),给出无法处理的回复。\n"
system_prompt += "4. 除非特别要求,请使用中文,指令可以是命令句、疑问句、或其他合适的类型。\n"
system_prompt += "5. 为指令生成一个适当且涉及真实情况的<input>,不应该只包含简单的占位符。<input>应提供实质性的内容,具有挑战性。字数不超过" + \
str(random.randint(80, 120)) + "字。\n"
system_prompt += "6. <output>应该是对指令的适当且真实的回应,不能只回复答应或拒绝请求。如果需要额外信息才能回复时,请努力预测用户意图并尝试回复。<output>的内容应少于" + \
str(random.randint(128, 512)) + "字。\n\n"
system_prompt += "请给出满足条件的20条JSON格式数据\n"
return system_prompt
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--cfg_path', default='../config.yaml', type=str)
parser.add_argument('--save_path', default='./output.json', type=str)
args = parser.parse_args()
with open(args.cfg_path, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
openai.api_key = cfg['API_KEY']
openai.api_base = cfg['API_BASE_URL']
output_file = open(args.save_path, 'w')
# number of data to generate (each prompt contains 20 JSON-formatted data)
# TODO: 改成流式的,不然会中途断掉
MAX_EPOCHS = 1
for k in range(MAX_EPOCHS):
response = openai.ChatCompletion.create(
# here we use `gpt-3.5-turbo` model, while Stanford-Alpaca uses `text-davinci-003`
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": return_random_prompt()},
]
)
output_file.write(response["choices"][0]["message"]["content"] + '\n')
output_file.close()

View File

@ -0,0 +1,63 @@
from transformers import LlamaTokenizer
from sentencepiece import sentencepiece_model_pb2 as model
import sentencepiece as sp
import argparse
import os
if __name__ == '__main__':
# Load arguments
parser = argparse.ArgumentParser()
parser.add_argument('--load_path', default='../src/models/base_model/chinese_llama_7b/tokenizer_chinese.model', type=str)
parser.add_argument('--save_dir', default='../src/models/base_model/save_chinese', type=str)
parser.add_argument('--voc_path', default='../data/vocabulary/legal_vocab_processed.txt', type=str)
args = parser.parse_args()
LOAD_PATH = args.load_path
SAVE_DIR = args.save_dir
VOC_PATH = args.voc_path
# Load pre-trained llama tokenizer and sentencepiece model
llama_spm = model.ModelProto()
llama_spm.ParseFromString(open(LOAD_PATH, "rb").read())
# show size of llama's vocabulary
llama_spm_tokens_set = set(p.piece for p in llama_spm.pieces)
print(f"Size of initial llama's vocabulary: {len(llama_spm_tokens_set)}")
# Load custom vocabulary
new_tokens = open(VOC_PATH, "r").read().split("\n")
for token in new_tokens:
if token not in llama_spm_tokens_set:
new_token = model.ModelProto().SentencePiece()
new_token.piece = token
new_token.score = 0
llama_spm.pieces.append(new_token)
print(f"Size of merged llama's vocabulary: {len(llama_spm.pieces)}")
# save
os.makedirs(SAVE_DIR, exist_ok=True)
SAVE_MODEL_PATH = os.path.join(SAVE_DIR, 'tokenizer.model')
SAVE_VOCAB_PATH = os.path.join(SAVE_DIR, 'tokenizer.vocab')
with open(SAVE_MODEL_PATH, 'wb') as f:
f.write(llama_spm.SerializeToString())
with open(SAVE_VOCAB_PATH, 'w') as f:
f.writelines([f'{token.piece} {token.score}\n' for token in llama_spm.pieces])
tokenizer = LlamaTokenizer(SAVE_MODEL_PATH)
tokenizer.save_pretrained(SAVE_DIR)
print(f'New llama tokenizer and spm has been saved to {SAVE_DIR}')
# test
llama_tokenizer_old = LlamaTokenizer.from_pretrained(LOAD_PATH)
llama_tokenizer_new = LlamaTokenizer.from_pretrained(SAVE_DIR)
text = '''登记错误赔偿责任登记等手续登记等手续生效登记机构和登记办法登记机构赔偿后登记机构应当提供登记收费问题'''
print(f'Size of old vocabulary: {llama_tokenizer_old.vocab_size}')
print(f'Size of new vocabulary: {llama_tokenizer_new.vocab_size}')
print('All special tokens and ids in new llama:')
print(llama_tokenizer_new.all_special_tokens)
print(llama_tokenizer_new.all_special_ids)
print(llama_tokenizer_new.special_tokens_map)
print(f'Text:\n{text}')
print(f'Tokenized by LLaMA tokenizer:\n {llama_tokenizer_old.tokenize(text)}')
print(f'Tokenized by NEW LLaMA tokenizer:\n {llama_tokenizer_new.tokenize(text)}')