from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
import torch
from peft import PeftModel, PeftConfig
class GoralConversation:
def __init__(
self,
message_template=" <s> {role}\n{content} </s>\n",
system_prompt="Ты — Горал, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им.",
start_token_id=1,
bot_token_id=9225,
):
self.message_template = message_template
self.start_token_id = start_token_id
self.bot_token_id = bot_token_id
self.messages = [{"role": "system", "content": system_prompt}]
def get_start_token_id(self):
return self.start_token_id
def get_bot_token_id(self):
return self.bot_token_id
def add_user_message(self, message):
self.messages.append({"role": "user", "content": message})
def add_bot_message(self, message):
self.messages.append({"role": "bot", "content": message})
def get_prompt(self, tokenizer):
final_text = ""
for message in self.messages:
message_text = self.message_template.format(**message)
final_text += message_text
final_text += tokenizer.decode(
[
self.start_token_id,
]
)
final_text += " "
final_text += tokenizer.decode([self.bot_token_id])
return final_text.strip()
def generate(model, tokenizer, prompt, generation_config):
data = tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=2048,
)
data = {k: v.to(model.device) for k, v in data.items()}
output_ids = model.generate(**data, generation_config=generation_config)[0]
output_ids = output_ids[len(data["input_ids"][0]) :]
output = tokenizer.decode(output_ids, skip_special_tokens=True)
return output.strip()
weights_path = "dim/llama2_7b_dolly_oasst1_chip2"
access_token = ""
config = PeftConfig.from_pretrained(weights_path)
model = AutoModelForCausalLM.from_pretrained(
config.base_model_name_or_path,
load_in_8bit=True,
torch_dtype=torch.float16,
device_map={"": 0},
token=access_token,
)
model = PeftModel.from_pretrained(
model,
weights_path,
torch_dtype=torch.float16,
)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(weights_path)
generation_config = GenerationConfig.from_pretrained(weights_path)
generation_config.do_sample = False
inp = "Напишите интересный пост в блоге о недавней поездке на Гавайи, рассказывая о культурном опыте и достопримечательностях, которые обязательно нужно увидеть."
conversation = GoralConversation(
start_token_id=1,
bot_token_id=9225,
)
conversation.add_user_message(inp)
prompt = conversation.get_prompt(tokenizer)
output = generate(model, tokenizer, prompt, generation_config)
print(inp)
print(output)
Training procedure
The following bitsandbytes
quantization config was used during training:
- load_in_8bit: True
- load_in_4bit: False
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: False
- llm_int8_has_fp16_weight: False
- bnb_4bit_quant_type: fp4
- bnb_4bit_use_double_quant: False
- bnb_4bit_compute_dtype: float32
Framework versions
- PEFT 0.4.0