zoukankan      html  css  js  c++  java
  • Pytorch-词向量

    导入包:

     1 import torch
     2 import torch.nn as nn  
     3 import torch.nn.functional as F  
     4 import torch.utils.data as tud 
     5 
     6 from torch.nn.parameter import Parameter  #参数更新和优化函数
     7 
     8 from collections import Counter           #Counter 计数器
     9 import numpy as np 
    10 import random
    11 import math 
    12 
    13 import pandas as pd
    14 import scipy                              #SciPy是基于NumPy开发的高级模块,它提供了许多数学算法和函数的实现
    15 import sklearn
    16 from sklearn.metrics.pairwise import cosine_similarity #余弦相似度函数

    参数设定:

     1 USE_CUDA = torch.cuda.is_available() #有GPU可以用
     2 
     3 # 为了保证实验结果可以复现,我们经常会把各种random seed固定在某一个值
     4 random.seed(53113)
     5 np.random.seed(53113)
     6 torch.manual_seed(53113)
     7 if USE_CUDA:
     8     torch.cuda.manual_seed(53113)
     9     
    10 # 设定一些超参数   
    11 K = 100                   # number of negative samples 负样本随机采样数量
    12 C = 3                     # nearby words threshold 指定周围三个单词进行预测
    13 NUM_EPOCHS = 2            # The number of epochs of training 迭代轮数
    14 MAX_VOCAB_SIZE = 30000    # the vocabulary size 词汇表多大
    15 BATCH_SIZE = 128          # the batch size 每轮迭代1个batch的数量
    16 LEARNING_RATE = 0.2       # the initial learning rate #学习率
    17 EMBEDDING_SIZE = 100      #词向量维度
    18        
    19     
    20 LOG_FILE = "word-embedding.log"
    21 
    22 # tokenize函数,把一篇文本转化成一个个单词
    23 def word_tokenize(text): 
    24     return text.split()

    1.创建vocabulary

    • 从文本文件中读取所有的文字,通过这些文本创建一个vocabulary;
    • 由于单词数量可能太大,我们只选取最常见的MAX_VOCAB_SIZE个单词;
    • 我们添加一个UNK单词表示所有不常见的单词;
    • 我们需要记录单词到index的mapping,以及index到单词的mapping,单词的count,单词的(normalized) frequency,以及单词总数。
    1 with open("sample_data/text8.train.txt", "r") as fin: #读入文件
    2     text = fin.read()
     1 text = [w for w in word_tokenize(text.lower())]              #分词,在这里类似于text.split()
     2 
     3 vocab = dict(Counter(text).most_common(MAX_VOCAB_SIZE-1))    #字典格式,把(MAX_VOCAB_SIZE-1)个最频繁出现的单词取出来,-1是留给不常见的单词
     4 vocab["<unk>"] = len(text) - np.sum(list(vocab.values()))    #不常见单词数=总单词数-常见单词数,这里计算的vocab["<unk>"]=29999
     5 
     6 idx_to_word = [word for word in vocab.keys()]                #取出字典的所有单词key
     7 word_to_idx = {word:i for i, word in enumerate(idx_to_word)} #取出所有单词和对应的索引,索引值与单词出现次数相反,最常见单词索引为0。
     8 
     9 word_counts = np.array([count for count in vocab.values()], dtype=np.float32)  #所有单词的频数values
    10 word_freqs = word_counts / np.sum(word_counts)                                 #所有单词的频率
    11 
    12 word_freqs = word_freqs ** (3./4.)                                             #论文里频率乘以3/4次方
    13 word_freqs = word_freqs / np.sum(word_freqs)                                   #被选作negative sampling的单词概率
    14 
    15 VOCAB_SIZE = len(idx_to_word)                                #词汇表单词数30000=MAX_VOCAB_SIZE

    2.实现Dataloader

    一个dataloader需要以下内容:

    • 把所有text编码成数字,然后用subsampling预处理这些文字。
    • 保存vocabulary,单词count,normalized word frequency
    • 每个iteration sample一个中心词
    • 根据当前的中心词返回context单词
    • 根据中心词sample一些negative单词
    • 返回单词的counts

    这里有一个好的tutorial介绍如何使用PyTorch dataloader. 为了使用dataloader,我们需要定义以下两个function:

    • __len__ function需要返回整个数据集中有多少个item
    • __get__ 根据给定的index返回一个item

    有了dataloader之后,我们可以轻松随机打乱整个数据集,拿到一个batch的数据等等。

     1 class WordEmbeddingDataset(tud.Dataset):           #tud.Dataset父类
     2     def __init__(self, text, word_to_idx, idx_to_word, word_freqs, word_counts):
     3         ''' text: a list of words, all text from the training dataset
     4             word_to_idx: the dictionary from word to idx
     5             idx_to_word: idx to word mapping
     6             word_freq: the frequency of each word
     7             word_counts: the word counts
     8         '''
     9         super(WordEmbeddingDataset, self).__init__()                             #初始化模型
    10         self.text_encoded = [word_to_idx.get(t, VOCAB_SIZE-1) for t in text]     #取出text里每个单词word_to_idx字典里对应的索引,不在字典里返回"<unk>"的索引,get括号里第二个参数应该写word_to_idx["<unk>"]
    11         self.text_encoded = torch.LongTensor(self.text_encoded)                  #变成Longtensor类型
    12         
    13         self.word_to_idx = word_to_idx             #以下皆为保存数据
    14         self.idx_to_word = idx_to_word  
    15         self.word_freqs = torch.Tensor(word_freqs) 
    16         self.word_counts = torch.Tensor(word_counts) 
    17         
    18     def __len__(self): 
    19         ''' 返回整个数据集(所有单词)的长度
    20         '''
    21         return len(self.text_encoded) 
    22         
    23     def __getitem__(self, idx):                    #这里__getitem__函数是个迭代器,idx代表了所有的单词索引
    24         ''' 这个function返回以下数据用于训练
    25             - 中心词
    26             - 这个单词附近的(positive)单词
    27             - 随机采样的K个单词作为negative sample
    28         '''
    29         center_word = self.text_encoded[idx]        #中心词索引
    30         
    31         pos_indices = list(range(idx-C, idx)) + list(range(idx+1, idx+C+1))   #除中心词外,周围词的索引,比如idx=0时,pos_indices = [-3, -2, -1, 1, 2, 3]  
    32         pos_indices = [i%len(self.text_encoded) for i in pos_indices]         #idx可能超出词汇总数,需要取余     
    33         pos_words = self.text_encoded[pos_indices]                            #周围词索引,是正例单词
    34 
    35         #负例采样单词索引,torch.multinomial作用是对self.word_freqs做K * pos_words.shape[0](正确单词数量)次取值,输出的是self.word_freqs对应的下标
    36         #取样方式采用有放回的采样,并且self.word_freqs数值越大,取样概率越大
    37         neg_words = torch.multinomial(self.word_freqs, K * pos_words.shape[0], True)
    38     
    39         return center_word, pos_words, neg_words

    创建dataset和dataloader

    1 dataset = WordEmbeddingDataset(text, word_to_idx, idx_to_word, word_freqs, word_counts)
    2 dataloader = tud.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)

    测试一下dataloader的内容(太大,显示shape吧)

    1 for i, (input_labels, pos_labels, neg_labels) in enumerate(dataloader):
    2     print(input_labels.shape, pos_labels.shape, neg_labels.shape)
    3     break

    torch.Size([128]) torch.Size([128, 6]) torch.Size([128, 600])

    3.Pytorch模型

     1 class EmbeddingModel(nn.Module):
     2     def __init__(self, vocab_size, embed_size):
     3         ''' 初始化输入和输出
     4         '''
     5         super(EmbeddingModel, self).__init__()
     6         self.vocab_size = vocab_size       #30000
     7         self.embed_size = embed_size       #100
     8         
     9         initrange = 0.5 / self.embed_size
    10         self.out_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)  #模型输出nn.Embedding(30000, 100)
    11         self.out_embed.weight.data.uniform_(-initrange, initrange)                     #权重初始化的一种方法
    12         
    13         self.in_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)   #模型输入nn.Embedding(30000, 100)
    14         self.in_embed.weight.data.uniform_(-initrange, initrange)              
    15         
    16         
    17     def forward(self, input_labels, pos_labels, neg_labels):                           #输入为tud.DataLoader()返回的三个数据
    18         '''
    19         input_labels: [batch_size],中心词
    20         pos_labels: [batch_size, (C * 2)],C=window_size,中心词周围context window出现过的单词 
    21         neg_labelss: [batch_size, (C * 2 * K)],中心词周围没有出现过的单词,从negative sampling得到 
    22         
    23         return: loss, [batch_size]
    24         '''       
    25         input_embedding = self.in_embed(input_labels)     #[batch_size, embed_size],(128,30000)*(30000,100)= 128 * 100      
    26         pos_embedding = self.out_embed(pos_labels)        #[batch_size, 2*C, embed_size],增加了维度(2*C),表示一个batch有B组周围词单词,一组周围词有(2*C)个单词,每个单词有embed_size个维度
    27         neg_embedding = self.out_embed(neg_labels)        #[batch_size, 2*C*K, embed_size],增加了维度(2*C*K)
    28       
    29         #torch.bmm()为batch间的矩阵相乘(b,n.m)*(b,m,p)=(b,n,p)
    30         pos_dot = torch.bmm(pos_embedding, input_embedding.unsqueeze(2)).squeeze()      #input_embedding.unsqueeze(2)的维度[batch_size, embed_size, 1],调用bmm之后[batch, 2*C, 1],再压缩掉最后一维
    31         neg_dot = torch.bmm(neg_embedding, -input_embedding.unsqueeze(2)).squeeze()     #[batch_size, 2*C*K] 
    32         
    33         #下面loss计算就是论文里的公式
    34         log_pos = F.logsigmoid(pos_dot).sum(1) #batch_size
    35         log_neg = F.logsigmoid(neg_dot).sum(1)      
    36         loss = log_pos + log_neg
    37         
    38         return -loss
    39     
    40     def input_embeddings(self):   #取出self.in_embed数据参数
    41         return self.in_embed.weight.data.cpu().numpy()

    详细见https://www.cnblogs.com/cxq1126/p/13418839.html的negSamplingLossAndGradient函数部分。

    定义一个模型以及把模型移动到GPU

    1 model = EmbeddingModel(VOCAB_SIZE, EMBEDDING_SIZE)      #得到model,有参数,有loss,可以优化了
    2 
    3 if USE_CUDA:
    4     model = model.cuda()

    4.评估模型

    评估的文件类似如下结构(word1  word2  相似度分值):

     1 def evaluate(filename, embedding_weights): 
     2     if filename.endswith(".csv"):
     3         data = pd.read_csv(filename, sep=",")
     4     else:
     5         data = pd.read_csv(filename, sep="	")
     6     human_similarity = []
     7     model_similarity = []
     8     for i in data.iloc[:, 0:2].index:                           #data.iloc[:, 0:2]取所有行索引为0、1的数据
     9         word1, word2 = data.iloc[i, 0], data.iloc[i, 1]
    10         if word1 not in word_to_idx or word2 not in word_to_idx:
    11             continue
    12         else:
    13             word1_idx, word2_idx = word_to_idx[word1], word_to_idx[word2]
    14             word1_embed, word2_embed = embedding_weights[[word1_idx]], embedding_weights[[word2_idx]]
    15             model_similarity.append(float(sklearn.metrics.pairwise.cosine_similarity(word1_embed, word2_embed)))       #模型计算的相似度
    16             human_similarity.append(float(data.iloc[i, 2]))                                                            #已知的相似度
    17 
    18     return scipy.stats.spearmanr(human_similarity, model_similarity)            #两者相似度的差异
    19 
    20 def find_nearest(word):
    21     index = word_to_idx[word]
    22     embedding = embedding_weights[index]
    23     cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])
    24     return [idx_to_word[i] for i in cos_dis.argsort()[:10]]

    5.训练模型

    • 模型一般需要训练若干个epoch
    • 每个epoch我们都把所有的数据分成若干个batch
    • 把每个batch的输入和输出都包装成cuda tensor
    • forward pass,通过输入的句子预测每个单词的下一个单词
    • 用模型的预测和正确的下一个单词计算cross entropy loss
    • 清空模型当前gradient
    • backward pass
    • 更新模型参数
    • 每隔一定的iteration输出模型在当前iteration的loss,以及在验证数据集上做模型的评估
     1 optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
     2 #随机梯度下降
     3 
     4 for e in range(NUM_EPOCHS): #开始迭代
     5     for i, (input_labels, pos_labels, neg_labels) in enumerate(dataloader):
     6         
     7         input_labels = input_labels.long() #longtensor
     8         pos_labels = pos_labels.long()
     9         neg_labels = neg_labels.long()
    10         if USE_CUDA:
    11             input_labels = input_labels.cuda()
    12             pos_labels = pos_labels.cuda()
    13             neg_labels = neg_labels.cuda()
    14          
    15         optimizer.zero_grad() #梯度归零
    16         loss = model(input_labels, pos_labels, neg_labels).mean()
    17         loss.backward()
    18         optimizer.step()
    19        
    20         #打印结果
    21         if i % 100 == 0:
    22             with open(LOG_FILE, "a") as fout:
    23                 fout.write("epoch: {}, iter: {}, loss: {}
    ".format(e, i, loss.item()))
    24                 print("epoch: {}, iter: {}, loss: {}".format(e, i, loss.item()))
    25             
    26         
    27         if i % 2000 == 0:
    28             embedding_weights = model.input_embeddings()
    29             sim_simlex = evaluate("sample_data/simlex-999.txt", embedding_weights)
    30             sim_men = evaluate("sample_data/men.txt", embedding_weights)
    31             sim_353 = evaluate("sample_data/wordsim353.csv", embedding_weights)
    32             with open(LOG_FILE, "a") as fout:
    33                 print("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}
    ".format(
    34                     e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
    35                 fout.write("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}
    ".format(
    36                     e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
    37                 
    38     embedding_weights = model.input_embeddings()
    39     np.save("embedding-{}".format(EMBEDDING_SIZE), embedding_weights)
    40     torch.save(model.state_dict(), "embedding-{}.th".format(EMBEDDING_SIZE))

    跑不动,训练过程先不放了。

    保存状态

    1 model.load_state_dict(torch.load("embedding-{}.th".format(EMBEDDING_SIZE)))

    6.在 MEN 和 Simplex-999 数据集上做评估

    1 embedding_weights = model.input_embeddings()
    2 print("simlex-999", evaluate("simlex-999.txt", embedding_weights))
    3 print("men", evaluate("men.txt", embedding_weights))
    4 print("wordsim353", evaluate("wordsim353.csv", embedding_weights))

    simlex-999 SpearmanrResult(correlation=0.17251697429101504, pvalue=7.863946056740345e-08)

    men SpearmanrResult(correlation=0.1778096817088841, pvalue=7.565661657312768e-20)

    wordsim353 SpearmanrResult(correlation=0.27153702278146635, pvalue=8.842165885381714e-07)

    7.寻找nearest neighbors

    1 for word in ["good", "fresh", "monster", "green", "like", "america", "chicago", "work", "computer", "language"]:
    2     print(word, find_nearest(word))

    good ['good', 'bad', 'perfect', 'hard', 'questions', 'alone', 'money', 'false', 'truth', 'experience']

    fresh ['fresh', 'grain', 'waste', 'cooling', 'lighter', 'dense', 'mild', 'sized', 'warm', 'steel']

    monster ['monster', 'giant', 'robot', 'hammer', 'clown', 'bull', 'demon', 'triangle', 'storyline', 'slogan']

    green ['green', 'blue', 'yellow', 'white', 'cross', 'orange', 'black', 'red', 'mountain', 'gold']

    like ['like', 'unlike', 'etc', 'whereas', 'animals', 'soft', 'amongst', 'similarly', 'bear', 'drink']

    america ['america', 'africa', 'korea', 'india', 'australia', 'turkey', 'pakistan', 'mexico', 'argentina', 'carolina']

    chicago ['chicago', 'boston', 'illinois', 'texas', 'london', 'indiana', 'massachusetts', 'florida', 'berkeley', 'michigan']

    work ['work', 'writing', 'job', 'marx', 'solo', 'label', 'recording', 'nietzsche', 'appearance', 'stage']

    computer ['computer', 'digital', 'electronic', 'audio', 'video', 'graphics', 'hardware', 'software', 'computers', 'program']

    language ['language', 'languages', 'alphabet', 'arabic', 'grammar', 'pronunciation', 'dialect', 'programming', 'chinese', 'spelling']

    8.单词之间的关系

    1 man_idx = word_to_idx["man"] 
    2 king_idx = word_to_idx["king"] 
    3 woman_idx = word_to_idx["woman"]
    4 embedding = embedding_weights[woman_idx] - embedding_weights[man_idx] + embedding_weights[king_idx]
    5 cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])
    6 for i in cos_dis.argsort()[:20]:
    7     print(idx_to_word[i])

    king henry charles pope queen iii prince elizabeth alexander constantine edward son iv louis emperor mary james joseph frederick francis

  • 相关阅读:
    0223_模拟2011
    0223_模拟2011
    0223_模拟2011
    0223_模拟2011
    12c DG broker DMON自动重启过程分析
    12c DG broker DMON自动重启过程分析
    12c DG broker DMON自动重启过程分析
    12c DG broker DMON自动重启过程分析
    CITA架构设计
    跨链合约示例
  • 原文地址:https://www.cnblogs.com/cxq1126/p/13435983.html
Copyright © 2011-2022 走看看