-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathT5.py
57 lines (48 loc) · 2.63 KB
/
T5.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# coding: UTF-8
import torch
import torch.nn as nn
from transformers import AutoModel, AutoTokenizer
class Config(object):
"""配置参数"""
def __init__(self, dataset, pretrained_name_or_path=None):
self.model_name = 'T5'
self.train_path = dataset + '/data/train.txt' # 训练集
self.dev_path = dataset + '/data/dev.txt' # 验证集
self.test_path = dataset + '/data/test.txt' # 测试集
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt').readlines()] # 类别名单
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/log/' + self.model_name
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
self.multi_label = False
self.num_classes = len(self.class_list) # 类别数
self.num_epochs = 3 # epoch数
self.batch_size = 128 # mini-batch大小
self.pad_size = 32 # 每句话处理成的长度(短填长切)
self.learning_rate = 5e-5 # 学习率
self.encoder_path = './t5_pretrain' if not pretrained_name_or_path else pretrained_name_or_path
self.tokenizer = AutoTokenizer.from_pretrained(self.encoder_path)
self.hidden_size = 768
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.encoder = AutoModel.from_pretrained(config.encoder_path)
self.tokenizer = config.tokenizer
self.device = config.device
for param in self.encoder.parameters():
param.requires_grad = True
self.fc = nn.Linear(config.hidden_size, config.num_classes)
def forward(self, x):
context = x[0] # 输入的句子
mask = x[2] # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]
# ref:https://discuss.huggingface.co/t/t5-classification-using-text2text/504/8
decoder_input_ids = torch.tensor([self.tokenizer.pad_token_id]).unsqueeze(0).expand(context.size(0), -1).to(
self.device)
# outputs:torch.Size([128, 1, 768])
outputs = self.encoder(context, attention_mask=mask, decoder_input_ids=decoder_input_ids,
return_dict=True).last_hidden_state
# token_ids = torch.argmax(outputs, dim=2)
# tokens = self.tokenizer.batch_decode(token_ids)
outputs = self.fc(outputs.squeeze())
return outputs