那些网站主做玄幻小说,wordpress 显示最新文章标题,代理公司注册流程,淘宝 网站建设教程视频教程昇思25天学习打卡营第18天 | 基于MindSpore的GPT2文本摘要 文章目录 昇思25天学习打卡营第18天 | 基于MindSpore的GPT2文本摘要数据集创建数据集数据预处理Tokenizer 模型构建构建GPT2ForSummarization模型动态学习率 模型训练模型推理总结打卡 数据集
实验使用nlpcc2017摘要数…昇思25天学习打卡营第18天 | 基于MindSpore的GPT2文本摘要 文章目录 昇思25天学习打卡营第18天 | 基于MindSpore的GPT2文本摘要数据集创建数据集数据预处理Tokenizer 模型构建构建GPT2ForSummarization模型动态学习率 模型训练模型推理总结打卡 数据集
实验使用nlpcc2017摘要数据内容为新闻正文及其摘要总计50000个样本。
创建数据集
from mindnlp.utils import http_get# download dataset
url https://download.mindspore.cn/toolkits/mindnlp/dataset/text_generation/nlpcc2017/train_with_summ.txt
path http_get(url, ./)from mindspore.dataset import TextFileDataset# load dataset
dataset TextFileDataset(str(path), shuffleFalse)
dataset.get_dataset_size()数据预处理
原始数据
article: [CLS] article_context [SEP]
summary: [CLS] summary_context [SEP]处理后的数据
[CLS] article_context [SEP] summary_context [SEP]import json
import numpy as np# preprocess dataset
def process_dataset(dataset, tokenizer, batch_size6, max_seq_len1024, shuffleFalse):def read_map(text):data json.loads(text.tobytes())return np.array(data[article]), np.array(data[summarization])def merge_and_pad(article, summary):# tokenization# pad to max_seq_length, only truncate the articletokenized tokenizer(textarticle, text_pairsummary,paddingmax_length, truncationonly_first, max_lengthmax_seq_len)return tokenized[input_ids], tokenized[input_ids]dataset dataset.map(read_map, text, [article, summary])# change column names to input_ids and labels for the following trainingdataset dataset.map(merge_and_pad, [article, summary], [input_ids, labels])dataset dataset.batch(batch_size)if shuffle:dataset dataset.shuffle(batch_size)return datasetTokenizer
由于GPT2无中文tokenizer使用BertTokenizer替代。
from mindnlp.transformers import BertTokenizer# We use BertTokenizer for tokenizing chinese context.
tokenizer BertTokenizer.from_pretrained(bert-base-chinese)
len(tokenizer)train_dataset process_dataset(train_dataset, tokenizer, batch_size4)模型构建
构建GPT2ForSummarization模型
from mindspore import ops
from mindnlp.transformers import GPT2LMHeadModelclass GPT2ForSummarization(GPT2LMHeadModel):def construct(self,input_ids None,attention_mask None,labels None,):outputs super().construct(input_idsinput_ids, attention_maskattention_mask)shift_logits outputs.logits[..., :-1, :]shift_labels labels[..., 1:]# Flatten the tokensloss ops.cross_entropy(shift_logits.view(-1, shift_logits.shape[-1]), shift_labels.view(-1), ignore_indextokenizer.pad_token_id)return loss动态学习率
from mindspore import ops
from mindspore.nn.learning_rate_schedule import LearningRateScheduleclass LinearWithWarmUp(LearningRateSchedule):Warmup-decay learning rate.def __init__(self, learning_rate, num_warmup_steps, num_training_steps):super().__init__()self.learning_rate learning_rateself.num_warmup_steps num_warmup_stepsself.num_training_steps num_training_stepsdef construct(self, global_step):if global_step self.num_warmup_steps:return global_step / float(max(1, self.num_warmup_steps)) * self.learning_ratereturn ops.maximum(0.0, (self.num_training_steps - global_step) / (max(1, self.num_training_steps - self.num_warmup_steps))) * self.learning_rate模型训练
num_epochs 1
warmup_steps 2000
learning_rate 1.5e-4num_training_steps num_epochs * train_dataset.get_dataset_size()from mindspore import nn
from mindnlp.transformers import GPT2Config, GPT2LMHeadModelconfig GPT2Config(vocab_sizelen(tokenizer))
model GPT2ForSummarization(config)lr_scheduler LinearWithWarmUp(learning_ratelearning_rate, num_warmup_stepswarmup_steps, num_training_stepsnum_training_steps)
optimizer nn.AdamWeightDecay(model.trainable_params(), learning_ratelr_scheduler)from mindnlp._legacy.engine import Trainer
from mindnlp._legacy.engine.callbacks import CheckpointCallbackckpoint_cb CheckpointCallback(save_pathcheckpoint, ckpt_namegpt2_summarization,epochs1, keep_checkpoint_max2)trainer Trainer(networkmodel, train_datasettrain_dataset,epochs1, optimizeroptimizer, callbacksckpoint_cb)
trainer.set_amp(levelO1) # 开启混合精度trainer.run(tgt_columnslabels)模型推理
def process_test_dataset(dataset, tokenizer, batch_size1, max_seq_len1024, max_summary_len100):def read_map(text):data json.loads(text.tobytes())return np.array(data[article]), np.array(data[summarization])def pad(article):tokenized tokenizer(textarticle, truncationTrue, max_lengthmax_seq_len-max_summary_len)return tokenized[input_ids]dataset dataset.map(read_map, text, [article, summary])dataset dataset.map(pad, article, [input_ids])dataset dataset.batch(batch_size)return datasettest_dataset process_test_dataset(test_dataset, tokenizer, batch_size1)
model GPT2LMHeadModel.from_pretrained(./checkpoint/gpt2_summarization_epoch_0.ckpt, configconfig)model.set_train(False)
model.config.eos_token_id model.config.sep_token_id
i 0
for (input_ids, raw_summary) in test_dataset.create_tuple_iterator():output_ids model.generate(input_ids, max_new_tokens50, num_beams5, no_repeat_ngram_size2)output_text tokenizer.decode(output_ids[0].tolist())print(output_text)i 1if i 1:break总结
这一节介绍了在MindSpore中使用GPT2LMHeadModel实现文本摘要的实验。实验使用nlpcc2017摘要数据并使用BertTokenizer进行中文分词此外还使用了动态学习率来调整模型收敛速度。
打卡