zoukankan      html  css  js  c++  java
  • transformer代码笔记----attention.py

    import numpy as np
    import torch
    import torch.nn as nn
    
    
    class MultiHeadAttention(nn.Module):
        ''' Multi-Head Attention module '''
    
        def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
            super().__init__()
    
            self.n_head = n_head
            self.d_k = d_k
            self.d_v = d_v
    
            self.w_qs = nn.Linear(d_model, n_head * d_k) #输入512维,输出512维
            self.w_ks = nn.Linear(d_model, n_head * d_k)
            self.w_vs = nn.Linear(d_model, n_head * d_v)
            nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k))) #权重初始化。mean=0:正态分布的均值;std:标准差
            nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))#np.sqrt():开根号
            nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
    
            self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5),
                                                       attn_dropout=dropout)  #np.power(a,b):a的b次方
            self.layer_norm = nn.LayerNorm(d_model) #归一化
    
            self.fc = nn.Linear(n_head * d_v, d_model) #线性变换,输入512维,输出512维
            nn.init.xavier_normal_(self.fc.weight) #初始化线性变换的权重
    
            self.dropout = nn.Dropout(dropout) #信息漏失率
    
        def forward(self, q, k, v, mask=None):
            d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
    
            sz_b, len_q, _ = q.size()  #获取查询矩阵的大小,长度
            sz_b, len_k, _ = k.size()
            sz_b, len_v, _ = v.size()
    
            residual = q
    
            q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) #重塑tensor的shape
            k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
            v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
    
            #.permute():改变tensor维度;contiguous方法改变了多维数组在内存中的存储顺序,以便配合view方法使用
            #torch.contiguous()方法首先拷贝了一份张量在内存中的地址,然后将地址按照形状改变后的张量的语义进行排列。
            q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k)  # (n*b) x lq x dk
            k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k)  # (n*b) x lk x dk
            v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v)  # (n*b) x lv x dv
    
            if mask is not None:
                mask = mask.repeat(n_head, 1, 1)  # (n*b) x .. x ..
    
            output, attn = self.attention(q, k, v, mask=mask) #进入attention得到输出和注意力关系
    
            output = output.view(n_head, sz_b, len_q, d_v) #对输出重塑
            output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)  # b x lq x (n*dv)
    
            output = self.dropout(self.fc(output)) #输出进行线性变换和dropout
            output = self.layer_norm(output + residual) #残差连接和归一化
    
            return output, attn
    
    
    class ScaledDotProductAttention(nn.Module):
        ''' Scaled Dot-Product Attention '''
    
        def __init__(self, temperature, attn_dropout=0.1):
            super().__init__()
            self.temperature = temperature
            self.dropout = nn.Dropout(attn_dropout)
            self.softmax = nn.Softmax(dim=2)
    
        def forward(self, q, k, v, mask=None):
            attn = torch.bmm(q, k.transpose(1, 2)) #三维矩阵相乘(qxk)
            attn = attn / self.temperature #qxk除以根号k的维度
    
            if mask is not None:
                attn = attn.masked_fill(mask.bool(), -np.inf) #-np.inf:负无穷的浮点数
    
            attn = self.softmax(attn)
            attn = self.dropout(attn)
            output = torch.bmm(attn, v)
    
            return output, attn
  • 相关阅读:
    准备试用一下PHPUnit
    php dump 当前所有局部变量
    利用JNI动态链接库实现Java调用Jerasure库
    CentOS 5.5下配置新的Java环境
    Eclipse安装SVN插件
    hadoop0.20.2 Eclipse下编译过程
    学习ant——Java的Build工具
    CentOS5.5下安装MySQL 5.5全过程
    转载:Hadoop0.20.2集群的安装配置
    Linux 下使用Java连接MySQL数据库,并且实现插入、删除、选择操作
  • 原文地址:https://www.cnblogs.com/Uriel-w/p/15426149.html
Copyright © 2011-2022 走看看