code

Model Card for Model ID

License: CC BY-NC-SA 4.0

Model description

odiagenAI-model-v1 is based on Llama-7b and finetuned with 171k Odia instruction set. The instruction set is translated data from open-source resources, resulting in good Odia instruction understanding and response generation capabilities.

The code of Odia data generation and other detailed information can be found in our Github project repository: https://github.com/OdiaGenAI/GenerativeAI_and_LLM_Odia. This repo contains a low-rank adapter for LLaMA-7b fit on the Stanford Alpaca dataset.

Training hyper-parameters

Parameter Value
Batch size 128
Learning rate 3e-4
Epochs 3
Cutoff length 256
Weight_decay 0.001
Warmup_rate 0.1
LR_scheduler linear
Lora r 16
Lora target modules (q_proj, k_proj, v_proj, o_proj)

Model can be easily loaded with AutoModelForCausalLM.

# import torch
from peft import PeftModel
# from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig


import torch
# from peft import PeftModel
import transformers
import gradio as gr

assert (
    "LlamaTokenizer" in transformers._import_structure["models.llama"]
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig

tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")

BASE_MODEL = "decapoda-research/llama-7b-hf"
LORA_WEIGHTS = "OdiaGenAI/odiagenAI-model-v1"

if torch.cuda.is_available():
    device = "cuda"
else:
    device = "cpu"

try:
    if torch.backends.mps.is_available():
        device = "mps"
except:
    pass

if device == "cuda":
    model = LlamaForCausalLM.from_pretrained(
        BASE_MODEL,
        load_in_8bit=False,
        torch_dtype=torch.float16,
        device_map="auto",
    )
    model = PeftModel.from_pretrained(
        model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True
    )
elif device == "mps":
    model = LlamaForCausalLM.from_pretrained(
        BASE_MODEL,
        device_map={"": device},
        torch_dtype=torch.float16,
    )
    model = PeftModel.from_pretrained(
        model,
        LORA_WEIGHTS,
        device_map={"": device},
        torch_dtype=torch.float16,
    )
else:
    model = LlamaForCausalLM.from_pretrained(
        BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
    )
    model = PeftModel.from_pretrained(
        model,
        LORA_WEIGHTS,
        device_map={"": device},
    )

def generate_prompt(instruction, input=None):
    if input:
        return f"""### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"""
    else:
        return f"""### Instruction:\n{instruction}\n\n### Response:\n"""

if device != "cpu":
    model.half()
model.eval()
if torch.__version__ >= "2":
    model = torch.compile(model)

def evaluate(
    instruction,
    input=None,
    temperature=0.1,
    top_p=0.75,
    top_k=40,
    num_beams=4,
    max_new_tokens=128,
    **kwargs,
):
    prompt = generate_prompt(instruction, input)
    print(prompt)
    inputs = tokenizer(prompt, return_tensors="pt")
    print(inputs)
    input_ids = inputs["input_ids"].to(device)
    print(input_ids)
    generation_config = GenerationConfig(
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        num_beams=num_beams,
        **kwargs,
    )
    with torch.no_grad():
        generation_output = model.generate(
            input_ids=input_ids,
            generation_config=generation_config,
            return_dict_in_generate=True,
            output_scores=True,
            max_new_tokens=max_new_tokens,
        )
        print(generation_output)
    s = generation_output.sequences[0]
    print(s)
    output = tokenizer.decode(s)
    print(output)
    return output.split("### Response:")[1].strip()

Instructions for running it can be found at https://github.com/OdiaGenAI/GenerativeAI_and_LLM_Odia.

Licensing Information

This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.

CC BY-NC-SA 4.0

Citation Information

If you find this helpful repository, please consider giving 👏 and citing:

@misc{OdiaGenAI,
  author = {Shantipriya Parida and Sambit Sekhar and Subhadarshi Panda and Soumendra Kumar Sahoo and Swateek Jena and Abhijeet Parida and Arghyadeep Sen and Satya Ranjan Dash and Deepak Kumar Pradhan},
  title = {OdiaGenAI: Generative AI and LLM Initiative for the Odia Language},
  year = {2023},
  publisher = {Hugging Face},
  journal = {Hugging Face repository},
  howpublished = {\url{https://huggingface.co/OdiaGenAI}},
}

Contributions