架构以及架构中的组件

架构以及架构中的组件

    • Transform

Transform

以下的代码包含:

  1. 标准化的示例
  2. 残差化的示例
# huggingface
# transformers# https://www.bilibili.com/video/BV1At4y1W75x?spm_id_from=333.999.0.0import copy
import math
from collections import namedtupleimport numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import VariableHypothesis = namedtuple('Hypothesis', ['value', 'score'])def clones(module, n):return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])"""
实现x 的标准化处理(标准化的作用:使x符合正太分布)
"""
class LayerNorm(nn.Module):def __init__(self, feature, eps=1e-6):""":param feature: self-attention 的 x 的大小:param eps:"""super(LayerNorm, self).__init__()self.a_2 = nn.Parameter(torch.ones(feature))self.b_2 = nn.Parameter(torch.zeros(feature))self.eps = epsdef forward(self, x):mean = x.mean(-1, keepdim=True)std = x.std(-1, keepdim=True)return self.a_2 * (x - mean) / (std + self.eps) + self.b_2"""
残差化的示例
"""
class SublayerConnection(nn.Module):"""这不仅仅做了残差,这是把残差和 layernorm 一起给做了"""def __init__(self, size, dropout=0.1):super(SublayerConnection, self).__init__()# 第一步做 layernorm 这是类的实例化的一种方法self.layer_norm = LayerNorm(size)# 第二步做 dropoutself.dropout = nn.Dropout(p=dropout)def forward(self, x, sublayer):""":param x: 就是self-attention的输入:param sublayer: self-attention层:return:"""return self.dropout(self.layer_norm(x + sublayer(x)))class FeatEmbedding(nn.Module):def __init__(self, d_feat, d_model, dropout):super(FeatEmbedding, self).__init__()self.video_embeddings = nn.Sequential(LayerNorm(d_feat),nn.Dropout(dropout),nn.Linear(d_feat, d_model))def forward(self, x):return self.video_embeddings(x)class TextEmbedding(nn.Module):def __init__(self, vocab_size, d_model):super(TextEmbedding, self).__init__()self.d_model = d_modelself.embed = nn.Embedding(vocab_size, d_model)def forward(self, x):return self.embed(x) * math.sqrt(self.d_model)class PositionalEncoding(nn.Module):def __init__(self, dim, dropout, max_len=5000):if dim % 2 != 0:raise ValueError("Cannot use sin/cos positional encoding with ""odd dim (got dim={:d})".format(dim))pe = torch.zeros(max_len, dim)position = torch.arange(0, max_len).unsqueeze(1)div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *-(math.log(10000.0) / dim)))pe[:, 0::2] = torch.sin(position.float() * div_term)pe[:, 1::2] = torch.cos(position.float() * div_term)pe = pe.unsqueeze(1)super(PositionalEncoding, self).__init__()self.register_buffer('pe', pe)self.drop_out = nn.Dropout(p=dropout)self.dim = dimdef forward(self, emb, step=None):emb = emb * math.sqrt(self.dim)if step is None:emb = emb + self.pe[:emb.size(0)]else:emb = emb + self.pe[step]emb = self.drop_out(emb)return emb"""
自注意力机制的实现示例
"""
def self_attention(query, key, value, dropout=None, mask=None):d_k = query.size(-1)scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)# mask的操作在QK之后,softmax之前if mask is not None:mask.cuda()scores = scores.masked_fill(mask == 0, -1e9)self_attn = F.softmax(scores, dim=-1)if dropout is not None:self_attn = dropout(self_attn)return torch.matmul(self_attn, value), self_attn"""
多头--注意力机制的实现示例
"""
class MultiHeadAttention(nn.Module):def __init__(self, head, d_model, dropout=0.1):super(MultiHeadAttention, self).__init__()assert (d_model % head == 0)self.d_k = d_model // headself.head = headself.d_model = d_modelself.linear_query = nn.Linear(d_model, d_model)self.linear_key = nn.Linear(d_model, d_model)self.linear_value = nn.Linear(d_model, d_model)self.linear_out = nn.Linear(d_model, d_model)self.dropout = nn.Dropout(p=dropout)self.attn = Nonedef forward(self, query, key, value, mask=None):if mask is not None:# 多头注意力机制的线性变换层是4维,是把query[batch, frame_num, d_model]变成[batch, -1, head, d_k]# 再1,2维交换变成[batch, head, -1, d_k], 所以mask要在第一维添加一维,与后面的self attention计算维度一样mask = mask.unsqueeze(1)n_batch = query.size(0)# if self.head == 1:#     x, self.attn = self_attention(query, key, value, dropout=self.dropout, mask=mask)# else:#     query = self.linear_query(query).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)  # [b, 8, 32, 64]#     key = self.linear_key(key).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)  # [b, 8, 28, 64]#     value = self.linear_value(value).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)  # [b, 8, 28, 64]##     x, self.attn = self_attention(query, key, value, dropout=self.dropout, mask=mask)#     # 变为三维, 或者说是concat head#     x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.head * self.d_k)query = self.linear_query(query).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)  # [b, 8, 32, 64]key = self.linear_key(key).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)  # [b, 8, 28, 64]value = self.linear_value(value).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)  # [b, 8, 28, 64]x, self.attn = self_attention(query, key, value, dropout=self.dropout, mask=mask)# 变为三维, 或者说是concat headx = x.transpose(1, 2).contiguous().view(n_batch, -1, self.head * self.d_k)return self.linear_out(x)class PositionWiseFeedForward(nn.Module):def __init__(self, d_model, d_ff, dropout=0.1):super(PositionWiseFeedForward, self).__init__()self.w_1 = nn.Linear(d_model, d_ff)self.w_2 = nn.Linear(d_ff, d_model)self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)self.dropout_1 = nn.Dropout(dropout)self.relu = nn.ReLU()self.dropout_2 = nn.Dropout(dropout)def forward(self, x):inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))output = self.dropout_2(self.w_2(inter))return outputclass EncoderLayer(nn.Module):def __init__(self, size, attn, feed_forward, dropout=0.1):super(EncoderLayer, self).__init__()self.attn = attnself.feed_forward = feed_forwardself.sublayer_connection = clones(SublayerConnection(size, dropout), 2)def forward(self, x, mask):x = self.sublayer_connection[0](x, lambda x: self.attn(x, x, x, mask))return self.sublayer_connection[1](x, self.feed_forward)class EncoderLayerNoAttention(nn.Module):def __init__(self, size, attn, feed_forward, dropout=0.1):super(EncoderLayerNoAttention, self).__init__()self.attn = attnself.feed_forward = feed_forwardself.sublayer_connection = clones(SublayerConnection(size, dropout), 2)def forward(self, x, mask):return self.sublayer_connection[1](x, self.feed_forward)class DecoderLayer(nn.Module):def __init__(self, size, attn, feed_forward, sublayer_num, dropout=0.1):super(DecoderLayer, self).__init__()self.attn = attnself.feed_forward = feed_forwardself.sublayer_connection = clones(SublayerConnection(size, dropout), sublayer_num)def forward(self, x, memory, src_mask, trg_mask, r2l_memory=None, r2l_trg_mask=None):x = self.sublayer_connection[0](x, lambda x: self.attn(x, x, x, trg_mask))x = self.sublayer_connection[1](x, lambda x: self.attn(x, memory, memory, src_mask))if r2l_memory is not None:x = self.sublayer_connection[-2](x, lambda x: self.attn(x, r2l_memory, r2l_memory, r2l_trg_mask))return self.sublayer_connection[-1](x, self.feed_forward)class Encoder(nn.Module):def __init__(self, n, encoder_layer):super(Encoder, self).__init__()self.encoder_layer = clones(encoder_layer, n)def forward(self, x, src_mask):for layer in self.encoder_layer:x = layer(x, src_mask)return xclass R2L_Decoder(nn.Module):def __init__(self, n, decoder_layer):super(R2L_Decoder, self).__init__()self.decoder_layer = clones(decoder_layer, n)def forward(self, x, memory, src_mask, r2l_trg_mask):for layer in self.decoder_layer:x = layer(x, memory, src_mask, r2l_trg_mask)return xclass L2R_Decoder(nn.Module):def __init__(self, n, decoder_layer):super(L2R_Decoder, self).__init__()self.decoder_layer = clones(decoder_layer, n)def forward(self, x, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask):for layer in self.decoder_layer:x = layer(x, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask)return xdef pad_mask(src, r2l_trg, trg, pad_idx):if isinstance(src, tuple):if len(src) == 4:src_image_mask = (src[0][:, :, 0] != pad_idx).unsqueeze(1)src_motion_mask = (src[1][:, :, 0] != pad_idx).unsqueeze(1)src_object_mask = (src[2][:, :, 0] != pad_idx).unsqueeze(1)src_rel_mask = (src[3][:, :, 0] != pad_idx).unsqueeze(1)enc_src_mask = (src_image_mask, src_motion_mask, src_object_mask, src_rel_mask)dec_src_mask_1 = src_image_mask & src_motion_maskdec_src_mask_2 = src_image_mask & src_motion_mask & src_object_mask & src_rel_maskdec_src_mask = (dec_src_mask_1, dec_src_mask_2)src_mask = (enc_src_mask, dec_src_mask)if len(src) == 3:src_image_mask = (src[0][:, :, 0] != pad_idx).unsqueeze(1)src_motion_mask = (src[1][:, :, 0] != pad_idx).unsqueeze(1)src_object_mask = (src[2][:, :, 0] != pad_idx).unsqueeze(1)enc_src_mask = (src_image_mask, src_motion_mask, src_object_mask)dec_src_mask = src_image_mask & src_motion_masksrc_mask = (enc_src_mask, dec_src_mask)if len(src) == 2:src_image_mask = (src[0][:, :, 0] != pad_idx).unsqueeze(1)src_motion_mask = (src[1][:, :, 0] != pad_idx).unsqueeze(1)enc_src_mask = (src_image_mask, src_motion_mask)dec_src_mask = src_image_mask & src_motion_masksrc_mask = (enc_src_mask, dec_src_mask)else:src_mask = (src[:, :, 0] != pad_idx).unsqueeze(1)if trg is not None:if isinstance(src_mask, tuple):trg_mask = (trg != pad_idx).unsqueeze(1) & subsequent_mask(trg.size(1)).type_as(src_image_mask.data)r2l_pad_mask = (r2l_trg != pad_idx).unsqueeze(1).type_as(src_image_mask.data)r2l_trg_mask = r2l_pad_mask & subsequent_mask(r2l_trg.size(1)).type_as(src_image_mask.data)return src_mask, r2l_pad_mask, r2l_trg_mask, trg_maskelse:trg_mask = (trg != pad_idx).unsqueeze(1) & subsequent_mask(trg.size(1)).type_as(src_mask.data)r2l_pad_mask = (r2l_trg != pad_idx).unsqueeze(1).type_as(src_mask.data)r2l_trg_mask = r2l_pad_mask & subsequent_mask(r2l_trg.size(1)).type_as(src_mask.data)return src_mask, r2l_pad_mask, r2l_trg_mask, trg_mask  # src_mask[batch, 1, lens]  trg_mask[batch, 1, lens]else:return src_maskdef subsequent_mask(size):"""Mask out subsequent positions."""attn_shape = (1, size, size)mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')return (torch.from_numpy(mask) == 0).cuda()class Generator(nn.Module):def __init__(self, d_model, vocab_size):super(Generator, self).__init__()self.linear = nn.Linear(d_model, vocab_size)def forward(self, x):return F.log_softmax(self.linear(x), dim=-1)class ABDTransformer(nn.Module):def __init__(self, vocab, d_feat, d_model, d_ff, n_heads, n_layers, dropout, feature_mode,device='cuda', n_heads_big=128):super(ABDTransformer, self).__init__()self.vocab = vocabself.device = deviceself.feature_mode = feature_modec = copy.deepcopy# attn_no_heads = MultiHeadAttention(1, d_model, dropout)attn = MultiHeadAttention(n_heads, d_model, dropout)attn_big = MultiHeadAttention(n_heads_big, d_model, dropout)# attn_big2 = MultiHeadAttention(10, d_model, dropout)feed_forward = PositionWiseFeedForward(d_model, d_ff)if feature_mode == 'one':self.src_embed = FeatEmbedding(d_feat, d_model, dropout)elif feature_mode == 'two':self.image_src_embed = FeatEmbedding(d_feat[0], d_model, dropout)self.motion_src_embed = FeatEmbedding(d_feat[1], d_model, dropout)elif feature_mode == 'three':self.image_src_embed = FeatEmbedding(d_feat[0], d_model, dropout)self.motion_src_embed = FeatEmbedding(d_feat[1], d_model, dropout)self.object_src_embed = FeatEmbedding(d_feat[2], d_model, dropout)elif feature_mode == 'four':self.image_src_embed = FeatEmbedding(d_feat[0], d_model, dropout)self.motion_src_embed = FeatEmbedding(d_feat[1], d_model, dropout)self.object_src_embed = FeatEmbedding(d_feat[2], d_model, dropout)self.rel_src_embed = FeatEmbedding(d_feat[3], d_model, dropout)self.trg_embed = TextEmbedding(vocab.n_vocabs, d_model)self.pos_embed = PositionalEncoding(d_model, dropout)# self.encoder_no_heads = Encoder(n_layers, EncoderLayer(d_model, c(attn_no_heads), c(feed_forward), dropout))self.encoder = Encoder(n_layers, EncoderLayer(d_model, c(attn), c(feed_forward), dropout))self.encoder_big = Encoder(n_layers, EncoderLayer(d_model, c(attn_big), c(feed_forward), dropout))# self.encoder_big2 = Encoder(n_layers, EncoderLayer(d_model, c(attn_big2), c(feed_forward), dropout))self.encoder_no_attention = Encoder(n_layers,EncoderLayerNoAttention(d_model, c(attn), c(feed_forward), dropout))self.r2l_decoder = R2L_Decoder(n_layers, DecoderLayer(d_model, c(attn), c(feed_forward),sublayer_num=3, dropout=dropout))self.l2r_decoder = L2R_Decoder(n_layers, DecoderLayer(d_model, c(attn), c(feed_forward),sublayer_num=4, dropout=dropout))self.generator = Generator(d_model, vocab.n_vocabs)def encode(self, src, src_mask, feature_mode_two=False):if self.feature_mode == 'two':x1 = self.image_src_embed(src[0])x1 = self.pos_embed(x1)x1 = self.encoder_big(x1, src_mask[0])x2 = self.motion_src_embed(src[1])x2 = self.pos_embed(x2)x2 = self.encoder_big(x2, src_mask[1])return x1 + x2if feature_mode_two:x1 = self.image_src_embed(src[0])x1 = self.pos_embed(x1)x1 = self.encoder_big(x1, src_mask[0])x2 = self.motion_src_embed(src[1])x2 = self.pos_embed(x2)x2 = self.encoder_big(x2, src_mask[1])return x1 + x2if self.feature_mode == 'one':x = self.src_embed(src)x = self.pos_embed(x)return self.encoder(x, src_mask)elif self.feature_mode == 'two':x1 = self.image_src_embed(src[0])x1 = self.pos_embed(x1)x1 = self.encoder_big(x1, src_mask[0])x2 = self.motion_src_embed(src[1])x2 = self.pos_embed(x2)x2 = self.encoder_big(x2, src_mask[1])return x1 + x2elif self.feature_mode == 'three':x1 = self.image_src_embed(src[0])x1 = self.pos_embed(x1)x1 = self.encoder(x1, src_mask[0])x2 = self.motion_src_embed(src[1])x2 = self.pos_embed(x2)x2 = self.encoder(x2, src_mask[1])x3 = self.object_src_embed(src[2])x3 = self.pos_embed(x3)x3 = self.encoder(x3, src_mask[2])return x1 + x2 + x3elif self.feature_mode == 'four':x1 = self.image_src_embed(src[0])x1 = self.pos_embed(x1)x1 = self.encoder(x1, src_mask[0])x2 = self.motion_src_embed(src[1])x2 = self.pos_embed(x2)x2 = self.encoder(x2, src_mask[1])x3 = self.object_src_embed(src[2])# x3 = self.pos_embed(x3)x3 = self.encoder(x3, src_mask[2])# x3 = self.encoder_no_attention(x3, src_mask[2])x4 = self.rel_src_embed(src[3])# x4 = self.pos_embed(x4)# x4 = self.encoder_no_# heads(x4, src_mask[3])x4 = self.encoder_no_attention(x4, src_mask[3])# x4 = self.encoder(x4, src_mask[3])return x1 + x2 + x3 + x4def r2l_decode(self, r2l_trg, memory, src_mask, r2l_trg_mask):x = self.trg_embed(r2l_trg)x = self.pos_embed(x)return self.r2l_decoder(x, memory, src_mask, r2l_trg_mask)def l2r_decode(self, trg, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask):x = self.trg_embed(trg)x = self.pos_embed(x)return self.l2r_decoder(x, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask)def forward(self, src, r2l_trg, trg, mask):src_mask, r2l_pad_mask, r2l_trg_mask, trg_mask = maskif self.feature_mode == 'one':encoding_outputs = self.encode(src, src_mask)r2l_outputs = self.r2l_decode(r2l_trg, encoding_outputs, src_mask, r2l_trg_mask)l2r_outputs = self.l2r_decode(trg, encoding_outputs, src_mask, trg_mask, r2l_outputs, r2l_pad_mask)elif self.feature_mode == 'two' or 'three' or 'four':enc_src_mask, dec_src_mask = src_maskr2l_encoding_outputs = self.encode(src, enc_src_mask, feature_mode_two=True)encoding_outputs = self.encode(src, enc_src_mask)r2l_outputs = self.r2l_decode(r2l_trg, r2l_encoding_outputs, dec_src_mask[0], r2l_trg_mask)l2r_outputs = self.l2r_decode(trg, encoding_outputs, dec_src_mask[1], trg_mask, r2l_outputs, r2l_pad_mask)# r2l_outputs = self.r2l_decode(r2l_trg, encoding_outputs, dec_src_mask, r2l_trg_mask)# l2r_outputs = self.l2r_decode(trg, encoding_outputs, dec_src_mask, trg_mask, None, None)else:raise "没有输出"r2l_pred = self.generator(r2l_outputs)l2r_pred = self.generator(l2r_outputs)return r2l_pred, l2r_preddef greedy_decode(self, batch_size, src_mask, memory, max_len):eos_idx = self.vocab.word2idx['<S>']r2l_hidden = Nonewith torch.no_grad():output = torch.ones(batch_size, 1).fill_(eos_idx).long().cuda()for i in range(max_len + 2 - 1):trg_mask = subsequent_mask(output.size(1))dec_out = self.r2l_decode(output, memory, src_mask, trg_mask)  # batch, len, d_modelr2l_hidden = dec_outpred = self.generator(dec_out)  # batch, len, n_vocabsnext_word = pred[:, -1].max(dim=-1)[1].unsqueeze(1)  # pred[:, -1]([batch, n_vocabs])output = torch.cat([output, next_word], dim=-1)return r2l_hidden, output# beam search 必用的def r2l_beam_search_decode(self, batch_size, src, src_mask, model_encodings, beam_size, max_len):end_symbol = self.vocab.word2idx['<S>']start_symbol = self.vocab.word2idx['<S>']r2l_outputs = None# 1.1 Setup Src"src has shape (batch_size, sent_len)""src_mask has shape (batch_size, 1, sent_len)"# src_mask = (src[:, :, 0] != self.vocab.word2idx['<PAD>']).unsqueeze(-2)  # TODO Untested"model_encodings has shape (batch_size, sentence_len, d_model)"# model_encodings = self.encode(src, src_mask)# 1.2 Setup Tgt Hypothesis Tracking"hypothesis is List(4 bt)[(cur beam_sz, dec_sent_len)], init: List(4 bt)[(1 init_beam_sz, dec_sent_len)]""hypotheses[i] is shape (cur beam_sz, dec_sent_len)"hypotheses = [copy.deepcopy(torch.full((1, 1), start_symbol, dtype=torch.long,device=self.device)) for _ in range(batch_size)]"List after init: List 4 bt of List of len max_len_completed, init: List of len 4 bt of []"completed_hypotheses = [copy.deepcopy([]) for _ in range(batch_size)]"List len batch_sz of shape (cur beam_sz), init: List(4 bt)[(1 init_beam_sz)]""hyp_scores[i] is shape (cur beam_sz)"hyp_scores = [copy.deepcopy(torch.full((1,), 0, dtype=torch.float, device=self.device))for _ in range(batch_size)]  # probs are log_probs must be init at 0.# 2. Iterate: Generate one char at a time until maxlenfor iter in range(max_len + 1):if all([len(completed_hypotheses[i]) == beam_size for i in range(batch_size)]):break# 2.1 Setup the batch. Since we use beam search, each batch has a variable number (called cur_beam_size)# between 0 and beam_size of hypotheses live at any moment. We decode all hypotheses for all batches at# the same time, so we must copy the src_encodings, src_mask, etc the appropriate number fo times for# the number of hypotheses for each example. We keep track of the number of live hypotheses for each example.# We run all hypotheses for all examples together through the decoder and log-softmax,# and then use `torch.split` to get the appropriate number of hypotheses for each example in the end.cur_beam_sizes, last_tokens, model_encodings_l, src_mask_l = [], [], [], []for i in range(batch_size):if hypotheses[i] is None:cur_beam_sizes += [0]continuecur_beam_size, decoded_len = hypotheses[i].shapecur_beam_sizes += [cur_beam_size]last_tokens += [hypotheses[i]]model_encodings_l += [model_encodings[i:i + 1]] * cur_beam_sizesrc_mask_l += [src_mask[i:i + 1]] * cur_beam_size"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 128 d_model)"model_encodings_cur = torch.cat(model_encodings_l, dim=0)src_mask_cur = torch.cat(src_mask_l, dim=0)y_tm1 = torch.cat(last_tokens, dim=0)"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 128 d_model)"if self.feature_mode == 'one':out = self.r2l_decode(Variable(y_tm1).to(self.device), model_encodings_cur, src_mask_cur,Variable(subsequent_mask(y_tm1.size(-1)).type_as(src.data)).to(self.device))elif self.feature_mode == 'two' or 'three' or 'four':out = self.r2l_decode(Variable(y_tm1).to(self.device), model_encodings_cur, src_mask_cur,Variable(subsequent_mask(y_tm1.size(-1)).type_as(src[0].data)).to(self.device))r2l_outputs = out"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 50002 vocab_sz)"log_prob = self.generator(out[:, -1, :]).unsqueeze(1)"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 50002 vocab_sz)"_, decoded_len, vocab_sz = log_prob.shape# log_prob = log_prob.reshape(batch_size, cur_beam_size, decoded_len, vocab_sz)"shape List(4 bt)[(cur_beam_sz_i, dec_sent_len, 50002 vocab_sz)]""log_prob[i] is (cur_beam_sz_i, dec_sent_len, 50002 vocab_sz)"log_prob = torch.split(log_prob, cur_beam_sizes, dim=0)# 2.2 Now we process each example in the batch. Note that the example may have already finished processing before# other examples (no more hypotheses to try), in which case we continuenew_hypotheses, new_hyp_scores = [], []for i in range(batch_size):if hypotheses[i] is None or len(completed_hypotheses[i]) >= beam_size:new_hypotheses += [None]new_hyp_scores += [None]continue# 2.2.1 We compute the cumulative scores for each live hypotheses for the example# hyp_scores is the old scores for the previous stage, and `log_prob` are the new probs for# this stage. Since they are log probs, we sum them instaed of multiplying them.# The .view(-1) forces all the hypotheses into one dimension. The shape of this dimension is# cur_beam_sz * vocab_sz (ex: 5 * 50002). So after getting the topk from it, we can recover the# generating sentence and the next word using: ix // vocab_sz, ix % vocab_sz.cur_beam_sz_i, dec_sent_len, vocab_sz = log_prob[i].shape"shape (vocab_sz,)"cumulative_hyp_scores_i = (hyp_scores[i].unsqueeze(-1).unsqueeze(-1).expand((cur_beam_sz_i, 1, vocab_sz)) + log_prob[i]).view(-1)# 2.2.2 We get the topk values in cumulative_hyp_scores_i and compute the current (generating) sentence# and the next word using: ix // vocab_sz, ix % vocab_sz."shape (cur_beam_sz,)"live_hyp_num_i = beam_size - len(completed_hypotheses[i])"shape (cur_beam_sz,). Vals are between 0 and 50002 vocab_sz"top_cand_hyp_scores, top_cand_hyp_pos = torch.topk(cumulative_hyp_scores_i, k=live_hyp_num_i)"shape (cur_beam_sz,). prev_hyp_ids vals are 0 <= val < cur_beam_sz. hyp_word_ids vals are 0 <= val < vocab_len"prev_hyp_ids, hyp_word_ids = top_cand_hyp_pos // self.vocab.n_vocabs, \top_cand_hyp_pos % self.vocab.n_vocabs# 2.2.3 For each of the topk words, we append the new word to the current (generating) sentence# We add this to new_hypotheses_i and add its corresponding total score to new_hyp_scores_inew_hypotheses_i, new_hyp_scores_i = [], []  # Removed live_hyp_ids_i, which is used in the LSTM decoder to track live hypothesis idsfor prev_hyp_id, hyp_word_id, cand_new_hyp_score in zip(prev_hyp_ids, hyp_word_ids,top_cand_hyp_scores):prev_hyp_id, hyp_word_id, cand_new_hyp_score = \prev_hyp_id.item(), hyp_word_id.item(), cand_new_hyp_score.item()new_hyp_sent = torch.cat((hypotheses[i][prev_hyp_id], torch.tensor([hyp_word_id], device=self.device)))if hyp_word_id == end_symbol:completed_hypotheses[i].append(Hypothesis(value=[self.vocab.idx2word[a.item()] for a in new_hyp_sent[1:-1]],score=cand_new_hyp_score))else:new_hypotheses_i.append(new_hyp_sent.unsqueeze(-1))new_hyp_scores_i.append(cand_new_hyp_score)# 2.2.4 We may find that the hypotheses_i for some example in the batch# is empty - we have fully processed that example. We use None as a sentinel in this case.# Above, the loops gracefully handle None examples.if len(new_hypotheses_i) > 0:hypotheses_i = torch.cat(new_hypotheses_i, dim=-1).transpose(0, -1).to(self.device)hyp_scores_i = torch.tensor(new_hyp_scores_i, dtype=torch.float, device=self.device)else:hypotheses_i, hyp_scores_i = None, Nonenew_hypotheses += [hypotheses_i]new_hyp_scores += [hyp_scores_i]# print(new_hypotheses, new_hyp_scores)hypotheses, hyp_scores = new_hypotheses, new_hyp_scores# 2.3 Finally, we do some postprocessing to get our final generated candidate sentences.# Sometimes, we may get to max_len of a sentence and still not generate the </s> end token.# In this case, the partial sentence we have generated will not be added to the completed_hypotheses# automatically, and we have to manually add it in. We add in as many as necessary so that there are# `beam_size` completed hypotheses for each example.# Finally, we sort each completed hypothesis by score.for i in range(batch_size):hyps_to_add = beam_size - len(completed_hypotheses[i])if hyps_to_add > 0:scores, ix = torch.topk(hyp_scores[i], k=hyps_to_add)for score, id in zip(scores, ix):completed_hypotheses[i].append(Hypothesis(value=[self.vocab.idx2word[a.item()] for a in hypotheses[i][id][1:]],score=score))completed_hypotheses[i].sort(key=lambda hyp: hyp.score, reverse=True)return r2l_outputs, completed_hypothesesdef beam_search_decode(self, src, beam_size, max_len):"""An Implementation of Beam Search for the Transformer Model.Beam search is performed in a batched manner. Each example in a batch generates `beam_size` hypotheses.We return a list (len: batch_size) of list (len: beam_size) of Hypothesis, which contain our output decoded sentencesand their scores.:param src: shape (sent_len, batch_size). Each val is 0 < val < len(vocab_dec). The input tokens to the decoder.:param max_len: the maximum length to decode:param beam_size: the beam size to use:return completed_hypotheses: A List of length batch_size, each containing a List of beam_size Hypothesis objects.Hypothesis is a named Tuple, its first entry is "value" and is a List of strings which contains the translated word(one string is one word token). The second entry is "score" and it is the log-prob score for this translated sentence.Note: Below I note "4 bt", "5 beam_size" as the shapes of objects. 4, 5 are default values. Actual values may differ."""# 1. Setupstart_symbol = self.vocab.word2idx['<S>']end_symbol = self.vocab.word2idx['<S>']# 1.1 Setup Src"src has shape (batch_size, sent_len)""src_mask has shape (batch_size, 1, sent_len)"# src_mask = (src[:, :, 0] != self.vocab.word2idx['<PAD>']).unsqueeze(-2)  # TODO Untestedsrc_mask = pad_mask(src, r2l_trg=None, trg=None, pad_idx=self.vocab.word2idx['<PAD>'])"model_encodings has shape (batch_size, sentence_len, d_model)"if self.feature_mode == 'one':batch_size = src.shape[0]model_encodings = self.encode(src, src_mask)r2l_memory, r2l_completed_hypotheses = self.r2l_beam_search_decode(batch_size, src, src_mask,model_encodings=model_encodings,beam_size=beam_size, max_len=max_len)elif self.feature_mode == 'two' or 'three' or 'four':batch_size = src[0].shape[0]enc_src_mask = src_mask[0]dec_src_mask = src_mask[1]r2l_model_encodings = self.encode(src, enc_src_mask, feature_mode_two=True)# model_encodings = r2l_model_encodingsmodel_encodings = self.encode(src, enc_src_mask)r2l_memory, r2l_completed_hypotheses = self.r2l_beam_search_decode(batch_size, src, dec_src_mask[0],model_encodings=r2l_model_encodings,beam_size=beam_size, max_len=max_len)# 1.2 Setup r2l target output# r2l_memory, r2l_completed_hypotheses = self.r2l_beam_search_decode(batch_size, src, src_mask,#                                                                    model_encodings=model_encodings,#                                                                    beam_size=1, max_len=max_len)# r2l_memory, r2l_completed_hypotheses = self.greedy_decode(batch_size, src_mask, model_encodings, max_len)# beam_r2l_memory = [copy.deepcopy(r2l_memory) for _ in range(beam_size)]# 1.3 Setup Tgt Hypothesis Tracking"hypothesis is List(4 bt)[(cur beam_sz, dec_sent_len)], init: List(4 bt)[(1 init_beam_sz, dec_sent_len)]""hypotheses[i] is shape (cur beam_sz, dec_sent_len)"hypotheses = [copy.deepcopy(torch.full((1, 1), start_symbol, dtype=torch.long,device=self.device)) for _ in range(batch_size)]"List after init: List 4 bt of List of len max_len_completed, init: List of len 4 bt of []"completed_hypotheses = [copy.deepcopy([]) for _ in range(batch_size)]"List len batch_sz of shape (cur beam_sz), init: List(4 bt)[(1 init_beam_sz)]""hyp_scores[i] is shape (cur beam_sz)"hyp_scores = [copy.deepcopy(torch.full((1,), 0, dtype=torch.float, device=self.device))for _ in range(batch_size)]  # probs are log_probs must be init at 0.# 2. Iterate: Generate one char at a time until maxlenfor iter in range(max_len + 1):if all([len(completed_hypotheses[i]) == beam_size for i in range(batch_size)]):break# 2.1 Setup the batch. Since we use beam search, each batch has a variable number (called cur_beam_size)# between 0 and beam_size of hypotheses live at any moment. We decode all hypotheses for all batches at# the same time, so we must copy the src_encodings, src_mask, etc the appropriate number fo times for# the number of hypotheses for each example. We keep track of the number of live hypotheses for each example.# We run all hypotheses for all examples together through the decoder and log-softmax,# and then use `torch.split` to get the appropriate number of hypotheses for each example in the end.cur_beam_sizes, last_tokens, model_encodings_l, src_mask_l, r2l_memory_l = [], [], [], [], []for i in range(batch_size):if hypotheses[i] is None:cur_beam_sizes += [0]continuecur_beam_size, decoded_len = hypotheses[i].shapecur_beam_sizes += [cur_beam_size]last_tokens += [hypotheses[i]]model_encodings_l += [model_encodings[i:i + 1]] * cur_beam_sizeif self.feature_mode == 'one':src_mask_l += [src_mask[i:i + 1]] * cur_beam_sizeelif self.feature_mode == 'two' or 'three' or 'four':src_mask_l += [dec_src_mask[1][i:i + 1]] * cur_beam_sizer2l_memory_l += [r2l_memory[i: i + 1]] * cur_beam_size"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 128 d_model)"model_encodings_cur = torch.cat(model_encodings_l, dim=0)src_mask_cur = torch.cat(src_mask_l, dim=0)y_tm1 = torch.cat(last_tokens, dim=0)r2l_memory_cur = torch.cat(r2l_memory_l, dim=0)"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 128 d_model)"if self.feature_mode == 'one':out = self.l2r_decode(Variable(y_tm1).to(self.device), model_encodings_cur, src_mask_cur,Variable(subsequent_mask(y_tm1.size(-1)).type_as(src.data)).to(self.device),r2l_memory_cur, r2l_trg_mask=None)elif self.feature_mode == 'two' or 'three' or 'four':out = self.l2r_decode(Variable(y_tm1).to(self.device), model_encodings_cur, src_mask_cur,Variable(subsequent_mask(y_tm1.size(-1)).type_as(src[0].data)).to(self.device),r2l_memory_cur, r2l_trg_mask=None)"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 50002 vocab_sz)"log_prob = self.generator(out[:, -1, :]).unsqueeze(1)"shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 50002 vocab_sz)"_, decoded_len, vocab_sz = log_prob.shape# log_prob = log_prob.reshape(batch_size, cur_beam_size, decoded_len, vocab_sz)"shape List(4 bt)[(cur_beam_sz_i, dec_sent_len, 50002 vocab_sz)]""log_prob[i] is (cur_beam_sz_i, dec_sent_len, 50002 vocab_sz)"log_prob = torch.split(log_prob, cur_beam_sizes, dim=0)# 2.2 Now we process each example in the batch. Note that the example may have already finished processing before# other examples (no more hypotheses to try), in which case we continuenew_hypotheses, new_hyp_scores = [], []for i in range(batch_size):if hypotheses[i] is None or len(completed_hypotheses[i]) >= beam_size:new_hypotheses += [None]new_hyp_scores += [None]continue# 2.2.1 We compute the cumulative scores for each live hypotheses for the example# hyp_scores is the old scores for the previous stage, and `log_prob` are the new probs for# this stage. Since they are log probs, we sum them instaed of multiplying them.# The .view(-1) forces all the hypotheses into one dimension. The shape of this dimension is# cur_beam_sz * vocab_sz (ex: 5 * 50002). So after getting the topk from it, we can recover the# generating sentence and the next word using: ix // vocab_sz, ix % vocab_sz.cur_beam_sz_i, dec_sent_len, vocab_sz = log_prob[i].shape"shape (vocab_sz,)"cumulative_hyp_scores_i = (hyp_scores[i].unsqueeze(-1).unsqueeze(-1).expand((cur_beam_sz_i, 1, vocab_sz)) + log_prob[i]).view(-1)# 2.2.2 We get the topk values in cumulative_hyp_scores_i and compute the current (generating) sentence# and the next word using: ix // vocab_sz, ix % vocab_sz."shape (cur_beam_sz,)"live_hyp_num_i = beam_size - len(completed_hypotheses[i])"shape (cur_beam_sz,). Vals are between 0 and 50002 vocab_sz"top_cand_hyp_scores, top_cand_hyp_pos = torch.topk(cumulative_hyp_scores_i, k=live_hyp_num_i)"shape (cur_beam_sz,). prev_hyp_ids vals are 0 <= val < cur_beam_sz. hyp_word_ids vals are 0 <= val < vocab_len"prev_hyp_ids, hyp_word_ids = top_cand_hyp_pos // self.vocab.n_vocabs, \top_cand_hyp_pos % self.vocab.n_vocabs# 2.2.3 For each of the topk words, we append the new word to the current (generating) sentence# We add this to new_hypotheses_i and add its corresponding total score to new_hyp_scores_inew_hypotheses_i, new_hyp_scores_i = [], []  # Removed live_hyp_ids_i, which is used in the LSTM decoder to track live hypothesis idsfor prev_hyp_id, hyp_word_id, cand_new_hyp_score in zip(prev_hyp_ids, hyp_word_ids,top_cand_hyp_scores):prev_hyp_id, hyp_word_id, cand_new_hyp_score = \prev_hyp_id.item(), hyp_word_id.item(), cand_new_hyp_score.item()new_hyp_sent = torch.cat((hypotheses[i][prev_hyp_id], torch.tensor([hyp_word_id], device=self.device)))if hyp_word_id == end_symbol:completed_hypotheses[i].append(Hypothesis(value=[self.vocab.idx2word[a.item()] for a in new_hyp_sent[1:-1]],score=cand_new_hyp_score))else:new_hypotheses_i.append(new_hyp_sent.unsqueeze(-1))new_hyp_scores_i.append(cand_new_hyp_score)# 2.2.4 We may find that the hypotheses_i for some example in the batch# is empty - we have fully processed that example. We use None as a sentinel in this case.# Above, the loops gracefully handle None examples.if len(new_hypotheses_i) > 0:hypotheses_i = torch.cat(new_hypotheses_i, dim=-1).transpose(0, -1).to(self.device)hyp_scores_i = torch.tensor(new_hyp_scores_i, dtype=torch.float, device=self.device)else:hypotheses_i, hyp_scores_i = None, Nonenew_hypotheses += [hypotheses_i]new_hyp_scores += [hyp_scores_i]# print(new_hypotheses, new_hyp_scores)hypotheses, hyp_scores = new_hypotheses, new_hyp_scores# 2.3 Finally, we do some postprocessing to get our final generated candidate sentences.# Sometimes, we may get to max_len of a sentence and still not generate the </s> end token.# In this case, the partial sentence we have generated will not be added to the completed_hypotheses# automatically, and we have to manually add it in. We add in as many as necessary so that there are# `beam_size` completed hypotheses for each example.# Finally, we sort each completed hypothesis by score.for i in range(batch_size):hyps_to_add = beam_size - len(completed_hypotheses[i])if hyps_to_add > 0:scores, ix = torch.topk(hyp_scores[i], k=hyps_to_add)for score, id in zip(scores, ix):completed_hypotheses[i].append(Hypothesis(value=[self.vocab.idx2word[a.item()] for a in hypotheses[i][id][1:]],score=score))completed_hypotheses[i].sort(key=lambda hyp: hyp.score, reverse=True)# print('completed_hypotheses', completed_hypotheses)return r2l_completed_hypotheses, completed_hypotheses

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/bicheng/47915.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

C++派生类对基类成员的访问

派生类继承了基类的全部数据成员和除了构造、析构函数之外的全部成员函数&#xff0c;但是这些成员在派生类中的访问属性在派生类的过程中是可以调整的&#xff0c;继承方式控制了基类中具有的不同访问属性。 基类的成员有公有&#xff08;public&#xff09;、保护&am…

项目实用linux 操作详解-轻松玩转linux

我之前写过完整的linux系统详解介绍&#xff1a; LInux操作详解一&#xff1a;vmware安装linux系统以及网络配置 LInux操作详解二&#xff1a;linux的目录结构 LInux操作详解三&#xff1a;linux实际操作及远程登录 LInux操作详解四&#xff1a;linux的vi和vim编辑器 LInux操作…

VPN以及GRE和MGRE

VPN VPN — 是虚拟专用网络 通俗地说&#xff0c;就是通过虚拟的手段&#xff0c;将两个独立的网络&#xff0c;穿越一个公共网络进行连接&#xff0c;实现点到点专线的效果&#xff08;可以理解为&#xff1a;一个分公司通过公网和总公司建立点到点的专线连接&#xff09; 现…

数据库理论基础

1.什么是数据库 1.1数据 描述事物的符号记录&#xff0c; 可以是数字、 文字、图形、图像、声音、语言等&#xff0c;数据有多种形式&#xff0c;它们都可以经过数字化后存入计算机。 1.2数据库 存储数据的仓库&#xff0c;是长期存放在计算机内、有组织、可共享的大量数据…

【05】LLaMA-Factory微调大模型——初尝微调模型

上文【04】LLaMA-Factory微调大模型——数据准备介绍了如何准备指令监督微调数据&#xff0c;为后续的微调模型提供高质量、格式规范的数据支撑。本文将正式进入模型微调阶段&#xff0c;构建法律垂直应用大模型。 一、硬件依赖 LLaMA-Factory框架对硬件和软件的依赖可见以下…

第二题(卡码网周赛第二十六期(23年阿里淘天笔试真题))

题目链接 第二题&#xff08;卡码网周赛第二十六期&#xff08;23年阿里淘天笔试真题&#xff09;&#xff09; 题目描述 讨厌鬼有一个长度为 n &#xff08;1 < n < 10^5&#xff09;的数组&#xff0c;他想知道这个数组有多少个子序列是一个排列? 子序列的定义: 数组…

Redis高级篇—分布式缓存

目录 Redis持久化 RDB持久化 AOF持久化 RDB与AOF对比 Redis主从 全量同步 增量同步 Redis哨兵 RedisTemplate集成哨兵实现 Redis分片集群 散列插槽 集群伸缩 故障转移 自动故障转移 手动故障转移 RedisTemplate访问分片集群 Redis持久化 RDB持久化 RDB全称Re…

Mojo 编程语言简介

Mojo 是一种新兴的编程语言&#xff0c;旨在结合 Python 的易用性与 C 的高性能&#xff0c;特别是面向高性能计算、机器学习和系统编程等领域。Mojo 的设计目标是提供一种既具备高级语言灵活性&#xff0c;又能够高效运行的编程工具。 主要特性 高性能&#xff1a;通过高级编…

Alpine Linux 轻量级Linux 适合于 docker 容器镜像

Alpine Linux是创始于2010年4月及以前的、一款开源社区开发的、基于musl libc和BusyBox的轻量级Linux发行版&#xff1b;适合用来做路由器、防火墙、VPNs、VoIP 盒子以及服务器的操作系统。 Alpine 的意思是“高山的”。Alpine Linux 围绕 musl libc 和 busybox 构建。这使得它…

cephrgw lifecycle理解

相关lc链接&#xff1a;rgw&#xff1a;lifecycle功能理解_ceph rgw lifecycle-CSDN博客 相关gc链接&#xff1a;RGW 的GC深入解析与调优-腾讯云开发者社区-腾讯云 每个rgw实例有rgw_lc_max_worker个worker (默认为3)来做lifecycle RGWLC::LCWorker::entry()-> RGWLC::pro…

LockSupport详解

目录 LockSupport详解1、LockSupport简介LockSupport 类的构造方法LockSupport 类的属性Thread类的parkBlocker属性LockSupport 类的常用方法挂起线程的相关方法唤醒线程的相关方法unpark(Thread thread)方法注意点LockSupport使用示例判断park的条件建议使用while而不是if引出…

算法篇 滑动窗口 leetCode 水果成篮

水果成蓝 1.题目描述2.图形分析2.1原理解释2.2 怎么想出使用滑动窗口2.3 图形分析 3.代码演示 1.题目描述 2.图形分析 2.1原理解释 2.2 怎么想出使用滑动窗口 2.3 图形分析 3.代码演示

学艺不精产生大坑-----vue 的v-else 和v-if控制多个组件的显隐,但是同时显示了

项目场景&#xff1a; 项目场景&#xff1a;在一个文件里通过v-else 和v-if控制多个组件的显隐 问题描述 明明满足了v-if的条件还是会自动执行v-else的&#xff0c;让两个组件同时显示了 <div v-if"busId 1"><taskApproval></taskApproval><…

Android 10.0 Launcher3拖拽图标进入hotseat自适应布局功能实现一

1.前言 在10.0的系统rom定制化开发中&#xff0c;在对于launcher3的一些开发定制中&#xff0c;在对hotseat的一些开发中&#xff0c;需要实现动态hotseat居中 的功能&#xff0c;就是在拖拽图标进入和拖出hotseat&#xff0c;都可以保持hotseat居中的功能&#xff0c;接下来分…

【Linux】基础I/O——理解ext2文件系统

我们到现在为止讲的都是打开的文件。现在我们讲讲没有打开的文件 如果一个文件没有被打开&#xff0c;那它就是在磁盘中被存储的&#xff0c;我们就要关心路径问题&#xff0c;存储问题&#xff0c;文件获取问题&#xff0c;那么操作系统是怎么处理这些问题的&#xff1f;不急…

配置SMTP服务器的要点是什么?有哪些限制?

配置SMTP服务器安全性如何保障&#xff1f;如何高效配置服务器&#xff1f; SMTP作为电子邮件发送的核心协议&#xff0c;其配置对于确保邮件的成功传递和安全至关重要。AokSend将详细介绍配置SMTP服务器的关键要点&#xff0c;帮助读者建立一个高效、安全的邮件发送系统。 配…

使用 Flask 3 搭建问答平台(三):注册页面模板渲染

前言 前端文件下载 链接https://pan.baidu.com/s/1Ju5hhhhy5pcUMM7VS3S5YA?pwd6666%C2%A0 知识点 1. 在路由中渲染前端页面 2. 使用 JinJa 2 模板实现前端代码复用 一、auth.py from flask import render_templatebp.route(/register, methods[GET]) def register():re…

swift小知识点

1、 如果你需要接收用户的输入可以使用 readLine(): let theInput readLine() 2、 类型别名对当前的类型定义了另一个名字&#xff0c;类型别名通过使用 typealias 关键字来定义。语法格式如下&#xff1a; typealias newname type 例如以下定义了 Int 的类型别名为 Fe…

政安晨【零基础玩转各类开源AI项目】基于Ubuntu系统部署Hallo :针对肖像图像动画的分层音频驱动视觉合成

政安晨的个人主页&#xff1a;政安晨 欢迎 &#x1f44d;点赞✍评论⭐收藏 收录专栏: 零基础玩转各类开源AI项目 希望政安晨的博客能够对您有所裨益&#xff0c;如有不足之处&#xff0c;欢迎在评论区提出指正&#xff01; 本文目标&#xff1a;在Ubuntu系统上部署Hallo&#x…

Python面试宝典第15题:岛屿数量

题目 在二维网格地图上&#xff0c;1 表示陆地&#xff0c;0 表示水域。如果相邻的陆地可以水平或垂直连接&#xff0c;则它们属于同一块岛屿。请进行编码&#xff0c;统计地图上的岛屿数量。比如&#xff1a;下面的二维网格地图&#xff0c;其岛屿数量为3。 基础知识 解决这类…