此模型仅为mt0-xl的lora adapter,需要下载mt0-xl
训练过程、适用的任务、使用方法详见 https://huggingface.co/yuyijiong/Randeng-T5-large-sentiment-analysis-Chinese

Usage

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import peft
import torch
from peft import PeftConfig, PeftModel
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, HfArgumentParser

peft_model_id = 'yuyijiong/mt0-xl-sentiment-quadruple-lora-adapter'
peft_config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(
    peft_config.base_model_name_or_path,
    load_in_8bit=True,
    device_map='auto'
)
tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)

# Load the Lora model
model = PeftModel.from_pretrained(model, peft_model_id)
model.eval()

text = '情感四元组(对象 | 观点 | 方面 | 极性)抽取任务(观点可以较长): [个头大、口感不错,就是个别坏了的或者有烂掉口子刻意用泥土封着,这样做不好。]'
input_ids = tokenizer(text,return_tensors="pt", padding=True)['input_ids'].cuda(0)

with torch.no_grad():
    with torch.autocast('cuda'):
      output = model.generate(input_ids=input_ids,generation_config=generation_config)

output_str = tokenizer.batch_decode(output, skip_special_tokens=True)