### encode the input paragraph to summarize
tokenized_text = tokenizer.encode_plus(
        str(text_input),
        return_attention_mask = True, 
        return_tensors="pt")

### pass the tokenized paragraph in model
generated_token = model.generate(
        input_ids = tokenized_text["input_ids"],
        attention_mask=tokenized_text["attention_mask"],
        max_length = 256,
        use_cache=True,
         )

### decode the summarized paragraph token
summarized_paragraph = [
           tokenizer.decode(token_ids=ids, skip_special_tokens=True) for ids in generated_token
   ]

Summarized paragraph is the final result.