This model is specialized for question answering with context in Vietnamese. How to use:

import torch
import os
from transformers import LlamaForCausalLM, LlamaTokenizer, LlamaConfig
from transformers import GenerationConfig, TextStreamer
from peft import PeftModel
from axolotl.prompters import AlpacaPrompter, PromptStyle

### Load model
torch_dtype = torch.bfloat16
device_map = {"": int(os.environ.get("CUDA_DEVICE") or 0)}

model_id = "nguyenthanhdo/noprob_model"

tokenizer = LlamaTokenizer.from_pretrained(model_id)
model = LlamaForCausalLM.from_pretrained(
    model_id,
    config=LlamaConfig.from_pretrained(model_id),
    device_map=device_map,
    torch_dtype=torch_dtype
)

### Build prompt
prompter = AlpacaPrompter(prompt_style=PromptStyle.INSTRUCT.value)
# instruction = "Provide short and concise answer. The answer should be straight and only provides explanation when needed." # Another instruction to test
instruction = 'You are an AI assistant. Provide a detailed answer so user don’t need to search outside to understand the answer.'
question = input()
context = input()
input = f"""Dựa vào bài viết dưới đây, trả lời câu hỏi phía dưới:\n{context}\n\nCâu hỏi: {question}"""
prompt = prompter.build_prompt(instruction=instruction, input=input, output="").__next__()

### Generate answer
input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"].to(model.device)
model.eval()
with torch.no_grad():
    generation_config = GenerationConfig(
        repetition_penalty=1.13,
        max_new_tokens=max_new_tokens,
        temperature=0.2,
        top_p=0.95,
        top_k=20,
        pad_token_id=tokenizer.pad_token_id,
        do_sample=True,
        use_cache=True,
        return_dict_in_generate=True,
        output_attentions=False,
        output_hidden_states=False,
        output_scores=False,
    )
    streamer = TextStreamer(tokenizer, skip_prompt=True)
    generated = model.generate(
        inputs=input_ids,
        generation_config=generation_config,
        streamer=streamer,
    )