Use:
tokenizer = BertTokenizerFast.from_pretrained('ozooora/kzlbert-3poi') model = AutoModelForSequenceClassification.from_pretrained('ozooora/kzlbert-3poi', return_dict=True)
@torch.no_grad() def predict(text): inputs = tokenizer(text, max_length=419, padding=True, truncation=True, return_tensors='pt') outputs = model(**inputs) predicted_probs = torch.nn.functional.softmax(outputs.logits, dim=1) predicted = torch.argmax(predicted_probs, dim=1).item() return predicted, predicted_probs[0].tolist()