text = "Context: سعي; Char: س; Pos: 0"
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("pain/text2svg_summarization-2-epochs-28-step-367000")
inputs = tokenizer(text, return_tensors="pt").input_ids
from transformers import AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained("pain/text2svg_summarization-2-epochs-28-step-367000")
outputs = model.generate(inputs, max_new_tokens=1024, do_sample=False)
tok_out = tokenizer.decode(outputs[0],skip_special_tokens=True)
tok_out