Generate Cover Letter

from transformers import AutoTokenizer, AutoModelForCausalLM
model_name = "ShashiVish/llama-7b-merged-int4-r512-cover-letter"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)


model = model.to('cuda')

job_title = "Senior Java Developer"
preferred_qualification = "3+ years of Java, Spring Boot"
hiring_company_name = "Google"
user_name = "Emily Evans"
past_working_experience= "Java Developer at XYZ for 4 years"
current_working_experience = "Senior Java Developer at ABC for 1 year"
skilleset= "Java, Spring Boot, Microservices, SQL, AWS"
qualification = "Master's in Electronics Science"

item = {'job_title': "Senior Java Developer", 'preferred_qualification': "5+ years of Java, Spring Boot",
            'hiring_company_name': "Netflix", 'user_name': "Emily Evans",
            'past_working_experience': "Java Developer at XYZ for 4 years",
            'current_working_experience': "Senior Java Developer at ABC for 1 year",
            'skilleset': "Java, Spring Boot, Microservices, SQL, AWS",
            'qualification': "Master's in Computer Science"}

prompt = f"""### Instruction:
You are a smart cover letter generator. Use following Input to generate Cover letter.

### Input:
Role: item['job_title'], Preferred Qualifications: {item['preferred_qualification']}, \
                        Hiring Company: {item['hiring_company_name']}, User Name: {item['user_name']}, \
                        Past Working Experience: {item['past_working_experience']}, \
                        Current Working Experience: {item['current_working_experience']}, \
                        Skillsets: {item['skilleset']}, Qualifications: {item['qualification']}

### Cover Letter:
"""

input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.cuda()
outputs = model.generate(input_ids=input_ids, max_new_tokens=512, do_sample=True, top_p=0.9,temperature=0.9)
#model_response = tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]
model_response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0][len(prompt):]

print(model_response)