code

this model uses Task classification and the conversation is between USER and Answer or AI

NOTE ⚠️

THE JAX/FLAX version of model is available both for training and usage

Using Model in Huggingface Transformers

Examples 🚀

</s><|prompter|> TEXT </s><|ai|>

For Byte by Byte Generation, You can use this code

# It's recommended to use PipeLine 
# Make Sure that you have sentence piece,bits and bytes and accelerate installed
from transformers import LlamaTokenizer, LlamaForCausalLM, pipeline, GenerationConfig
import torch
from IPython.display import clear_output
import textwrap
from typing import List, Optional
import re
import base64

tokenizer = LlamaTokenizer.from_pretrained("erfanzar/LGeM-13B-MT")
model = LlamaForCausalLM.from_pretrained(
    'erfanzar/LGeM-13B-MT',
    load_in_8bit=True, 
    device_map='auto',
    torch_dtype=torch.float16
)

def generator(input_text,pipe_line,max_number=256,do_print=False ,args_a=False):
  verify_text = lambda txt : '\n'.join([textwrap.fill(txt, width=140) for txt in txt.split('\n')])
  
  orginal_text = input_text
  if not input_text.startswith(f'<|prompter|>') and args_a:
    input_text = f'<\s><|prompter|> {input_text}<\s><|ai|>'
  for i in range(max_number):
    exac = input_text
    with torch.no_grad():
      output = pipe_line(input_text)
    input_text = output[0]['generated_text']
    if do_print:
      clear_output(wait=True)
      print(verify_text(input_text))
    
    if input_text.endswith('<\s>') and i>6 or exac == input_text or input_text.endswith('<|prompter|>') and i>6:
      break
    yield verify_text(input_text)

And Use just like


pipe_line = pipeline(
    "text-generation",
    model=model, 
    tokenizer=tokenizer, 
    temperature=0.8,
    top_p=0.95,
    max_new_tokens=4,
    output_scores=True

)

Generate Method to get res Text by Text


def generate(model_,input_ids_,tokeinzer_,max_length:int=256,temperature :float= 1,eos_token_id:int=2):
  with torch.no_grad():
    before_start = len(input_ids_[0])+1
    for _ in range(max_length):
      out = model_(
          input_ids=input_ids_,
          return_dict=True,
      )
      opa = torch.nn.functional.softmax(out.logits[:,-1,:]/temperature)
      Camila = torch.multinomial(opa,1)
      input_ids_ = torch.cat([input_ids_,Camila],-1)
      clear_output(wait=True)
      print(f"\r{tokeinzer_.decode(input_ids_[0],skip_special_tokens=True)[before_start:]}",end='')
      if Camila[0].item() == eos_token_id:
        break
      yield tokeinzer_.decode(Camila[0],skip_special_tokens=True)
  return f"{tokeinzer_.decode(input_ids_[0],skip_special_tokens=True)[before_start:]}"

Result

import socket
import time

def check_internet_connection():
    try:
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect(("www.google.com", 80))
        print("Internet connection is active.")
    except:
        print("Internet connection is not active.")

if __name__ == "__main__":

  check_internet_connection()

Using Model in OST

LGeM 🚀

# Pytorch
from modules import LGeMForCausalLM
# Jax
from modules import FlaxLGeMForCausalLM
python3 LGeM-train.py