700字范文,内容丰富有趣,生活中的好帮手!
700字范文 > 【英文文本分类实战】之六——模型与训练-评估-测试

【英文文本分类实战】之六——模型与训练-评估-测试

时间:2022-05-31 05:28:17

相关推荐

【英文文本分类实战】之六——模型与训练-评估-测试

·请参考本系列目录:【英文文本分类实战】之一——实战项目总览

·下载本实战项目资源:神经网络实现英文文本分类.zip(pytorch)

[1] 编写模型

1、TextRNN

参考论文《Recurrent Neural Network for Text Classification with Multi-Task Learning》提出的TextRNN模型,我们编写TextRNN模型,代码如下:

class Config(object):"""配置参数"""def __init__(self, dataset, embedding):self.model_name = 'TextRNN'self.train_path = dataset + '/data/train.csv' # 训练集self.dev_path = dataset + '/data/dev.csv'# 验证集self.test_path = dataset + '/data/test.csv' # 测试集self.class_list = [x.strip() for x in open(dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单self.vocab_path = dataset + '/data/vocab.pkl' # 词表self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果self.log_path = dataset + '/log/' + self.model_nameself.embedding_pretrained = torch.tensor(np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\if embedding != 'random' else None # 预训练词向量self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备self.dropout = 0.5# 随机失活 当num_layers=1,dropout是无用的self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练self.num_classes = len(self.class_list)# 类别数self.n_vocab = 0 # 词表大小,在运行时赋值self.num_epochs = 10 # epoch数self.batch_size = 128 # mini-batch大小self.pad_size = 14# 每句话处理成的长度(短填长切)self.learning_rate = 1e-3 # 学习率self.embed = self.embedding_pretrained.size(1)\if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一self.hidden_size = 128 # lstm隐藏层self.num_layers = 2# lstm层数'''Recurrent Neural Network for Text Classification with Multi-Task Learning''''''shape :1. embedding output shape : [batch_size, seq_len, embeding] = [128, 32, 300].2. lstm output shape : [batch_size, seq_len, hidden_size * 2] = [128, 32, 256] 此处的32不能再看成一句话内的32个词,已经变成了lstm的32个时刻.3. out[:, -1, :] output shape : [batch_size, hidden_size * 2] = [128, 256] 取句子最后时刻的 hidden state.other:1. lstm层数大小不会影响lstm的输出形状.2. 双向lstm会使输出形状翻倍,即hidden_size * 2.'''class Model(nn.Module):def __init__(self, config):super(Model, self).__init__()if config.embedding_pretrained is not None:self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)else:self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,bidirectional=True, batch_first=True, dropout=config.dropout)self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)def forward(self, x):x, _ = xout = self.embedding(x) # [batch_size, seq_len, embeding] = [128, 32, 300]out, _ = self.lstm(out) # [batch_size, seq_len, hidden_size * 2]=[128, 32, 256]out = self.fc(out[:, -1, :]) # [batch_size, hidden_size * 2] = [128, 256]return out

2、DPCNN

参考论文《Deep Pyramid Convolutional Neural Networks for Text Categorization》提出的DPCNN模型,我们编写DPCNN模型,代码如下:

class Config(object):"""配置参数"""def __init__(self, dataset, embedding):self.model_name = 'DPCNN'self.train_path = dataset + '/data/train.csv' # 训练集self.dev_path = dataset + '/data/dev.csv'# 验证集self.test_path = dataset + '/data/test.csv' # 测试集self.class_list = [x.strip() for x in open(dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单self.vocab_path = dataset + '/data/vocab.pkl' # 词表self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果self.log_path = dataset + '/log/' + self.model_nameself.embedding_pretrained = torch.tensor(np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\if embedding != 'random' else None # 预训练词向量self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备self.dropout = 0.5# 随机失活self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练self.num_classes = len(self.class_list)# 类别数self.n_vocab = 0 # 词表大小,在运行时赋值self.num_epochs = 20 # epoch数self.batch_size = 128 # mini-batch大小self.pad_size = 14# 每句话处理成的长度(短填长切)self.learning_rate = 1e-3 # 学习率self.embed = self.embedding_pretrained.size(1)\if self.embedding_pretrained is not None else 300 # 字向量维度self.num_filters = 250 # 卷积核数量(channels数)'''Deep Pyramid Convolutional Neural Networks for Text Categorization'''class Model(nn.Module):def __init__(self, config):super(Model, self).__init__()if config.embedding_pretrained is not None:self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)else:self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)self.conv_region = nn.Conv2d(1, config.num_filters, (3, config.embed), stride=1)self.conv = nn.Conv2d(config.num_filters, config.num_filters, (3, 1), stride=1)self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)self.padding1 = nn.ZeroPad2d((0, 0, 1, 1)) # top bottomself.padding2 = nn.ZeroPad2d((0, 0, 0, 1)) # bottomself.relu = nn.ReLU()self.fc = nn.Linear(config.num_filters, config.num_classes)def forward(self, x):x = x[0]x = self.embedding(x)x = x.unsqueeze(1) # [batch_size, 250, seq_len, 1]# Region embedding 区域嵌入 3-gramx = self.conv_region(x) # [batch_size, 250, seq_len-3+1, 1]x = self.padding1(x) # [batch_size, 250, seq_len, 1]x = self.relu(x)x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]x = self.padding1(x) # [batch_size, 250, seq_len, 1]x = self.relu(x)x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]while x.size()[2] > 2:x = self._block(x)x = x.squeeze() # [batch_size, num_filters(250)]x = self.fc(x)return xdef _block(self, x):x = self.padding2(x)px = self.max_pool(x)x = self.padding1(px)x = F.relu(x)x = self.conv(x)x = self.padding1(x)x = F.relu(x)x = self.conv(x)# Short Cutx = x + pxreturn x

3、TextCNN

参考论文《Convolutional Neural Networks for Sentence Classification》提出的TextCNN模型,我们编写TextCNN模型,代码如下:

class Config(object):"""配置参数"""def __init__(self, dataset, embedding):self.model_name = 'TextCNN'self.train_path = dataset + '/data/train.csv' # 训练集self.dev_path = dataset + '/data/dev.csv'# 验证集self.test_path = dataset + '/data/test.csv' # 测试集self.class_list = [x.strip() for x in open(dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单self.vocab_path = dataset + '/data/vocab.pkl' # 词表self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果self.log_path = dataset + '/log/' + self.model_nameself.embedding_pretrained = torch.tensor(np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\if embedding != 'random' else None # 预训练词向量self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备self.dropout = 0.5# 随机失活self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练self.num_classes = len(self.class_list)# 类别数self.n_vocab = 0 # 词表大小,在运行时赋值self.num_epochs = 20 # epoch数self.batch_size = 128 # mini-batch大小self.pad_size = 14# 每句话处理成的长度(短填长切)self.learning_rate = 1e-3 # 学习率self.embed = self.embedding_pretrained.size(1)\if self.embedding_pretrained is not None else 300 # 字向量维度self.filter_sizes = (2, 3, 4)# 卷积核尺寸self.num_filters = 256 # 卷积核数量(channels数)'''Convolutional Neural Networks for Sentence Classification'''class Model(nn.Module):def __init__(self, config):super(Model, self).__init__()if config.embedding_pretrained is not None:self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)else:self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)self.convs = nn.ModuleList([nn.Conv2d(1, config.num_filters, (k, config.embed)) for k in config.filter_sizes])self.dropout = nn.Dropout(config.dropout)self.fc = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)def conv_and_pool(self, x, conv):x = F.relu(conv(x)).squeeze(3)x = F.max_pool1d(x, x.size(2)).squeeze(2)return xdef forward(self, x):out = self.embedding(x[0])out = out.unsqueeze(1)out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)out = self.dropout(out)out = self.fc(out)return out

以上模型都是按照论文复现的,其中Config类的配置是几乎相同的,其中参数有:

·model_name:模型名称,在训练模型时,需要设置--model model_name

·train_pathdev_pathtest_path:训练集、验证集、测试集的地址;

·class_list:读取存放类别的txt文件,主要是为了获取有几个标签;

·vocab_path:词典地址;

·save_path:模型训练结果的存放地址;

·embedding_pretrained:读取预训练词向量,如果设置--embedding random那么不会读取预训练词向量,会随机生成词向量,在训练中反向更新;

·device:设备,选择使用GPU还是CPU;

·dropout:随机失活率,可以加在很多层上;

·require_improvement:若超过1000batch效果还没提升,则提前结束训练;

·num_classes:类别数;

·num_epochs:训练的epoch数;

·batch_size:一个batch中有几条文本;

·pad_size:每句话处理成的长度(短填长切)

·learning_rate:学习率。

[2] 模型训练-验证-测试代码

训练:

def train(config, model, train_iter, dev_iter, test_iter):start_time = time.time()model.train()optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)# 学习率指数衰减,每次epoch:学习率 = gamma * 学习率# scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)total_batch = 0 # 记录进行到多少batchdev_best_loss = float('inf')last_improve = 0 # 记录上次验证集loss下降的batch数flag = False # 记录是否很久没有效果提升writer = SummaryWriter(log_dir=config.log_path + '/' + time.strftime('%m-%d_%H.%M', time.localtime()))for epoch in range(config.num_epochs):print('Epoch [{}/{}]'.format(epoch + 1, config.num_epochs))# scheduler.step() # 学习率衰减for i, (trains, labels) in enumerate(train_iter):outputs = model(trains)model.zero_grad()loss = F.cross_entropy(outputs, labels)# print(f"&&&&&&&&&&{epoch}&&{i}")loss.backward()# print(f"###############{epoch}##{i}")optimizer.step()if total_batch % 100 == 0:# 每多少轮输出在训练集和验证集上的效果true = labels.data.cpu()predic = torch.max(outputs.data, 1)[1].cpu()train_acc = metrics.accuracy_score(true, predic)dev_acc, dev_loss = evaluate(config, model, dev_iter)if dev_loss < dev_best_loss:dev_best_loss = dev_losstorch.save(model.state_dict(), config.save_path)improve = '*'last_improve = total_batchelse:improve = ''time_dif = get_time_dif(start_time)msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Val Loss: {3:>5.2}, Val Acc: {4:>6.2%}, Time: {5} {6}'print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, time_dif, improve))writer.add_scalar("loss/train", loss.item(), total_batch)writer.add_scalar("loss/dev", dev_loss, total_batch)writer.add_scalar("acc/train", train_acc, total_batch)writer.add_scalar("acc/dev", dev_acc, total_batch)model.train()total_batch += 1if total_batch - last_improve > config.require_improvement:# 验证集loss超过1000batch没下降,结束训练print("No optimization for a long time, auto-stopping...")flag = Truebreakif flag:breakwriter.close()test(config, model, test_iter)

评估:

def evaluate(config, model, data_iter, test=False):model.eval()loss_total = 0predict_all = np.array([], dtype=int)labels_all = np.array([], dtype=int)with torch.no_grad():for texts, labels in data_iter:outputs = model(texts)loss = F.cross_entropy(outputs, labels)loss_total += losslabels = labels.data.cpu().numpy()predic = torch.max(outputs.data, 1)[1].cpu().numpy()labels_all = np.append(labels_all, labels)predict_all = np.append(predict_all, predic)acc = metrics.accuracy_score(labels_all, predict_all)if test:report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)confusion = metrics.confusion_matrix(labels_all, predict_all)return acc, loss_total / len(data_iter), report, confusionreturn acc, loss_total / len(data_iter)

评估:

def test(config, model, test_iter):# testmodel.load_state_dict(torch.load(config.save_path))model.eval()start_time = time.time()test_acc, test_loss, test_report, test_confusion = evaluate(config, model, test_iter, test=True)msg = 'Test Loss: {0:>5.2}, Test Acc: {1:>6.2%}'print(msg.format(test_loss, test_acc))print("Precision, Recall and F1-Score...")print(test_report)print("Confusion Matrix...")print(test_confusion)time_dif = get_time_dif(start_time)print("Time usage:", time_dif)

查看输出:每过100轮会打印一次

Epoch [1/10]Iter:0, Train Loss: 2.1, Train Acc: 12.50%, Val Loss: 2.1, Val Acc: 15.15%, Time: 0:00:04 *Iter: 100, Train Loss: 0.9, Train Acc: 69.53%, Val Loss: 0.99, Val Acc: 65.16%, Time: 0:00:06 *Iter: 200, Train Loss: 0.9, Train Acc: 68.75%, Val Loss: 0.86, Val Acc: 70.27%, Time: 0:00:08 *

[3] 如何运行代码

模型主要有两个参数:

·model:模型名称;

·embedding:预训练词向量名称或者random

在项目的run.py文件运行时同时添加参数,如下图:

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。