abusive text classification
from transformers import pipeline


model_path = 'marianna13/xlm-roberta-fine-tuned-on-russian-abusive-language'

id2label = {
    0:'неопасный тескт',
    1:'опасный тескт'
}

label2id = {
    'неопасный тескт':0,
    'опасный тескт':1
}
config = AutoConfig.from_pretrained(model_path, id2label=id2label, label2id=label2id)
tokenizer = AutoTokenizer.from_pretrained(model_path)

model = AutoModelForSequenceClassification.from_pretrained(model_path, config=config)

text = "Прекрасный день."
pipe = pipeline('text-classification', model=model, tokenizer=tokenizer)
pipe(text)
[{'label': 'неопасный текcт', 'score': 0.9249424934387207}]