zoukankan      html  css  js  c++  java
  • Pytorch中RNN和LSTM的简单应用

    使用RNN执行回归任务

    import torch
    from torch import nn
    import numpy as np
    import matplotlib.pyplot as plt
    
    # torch.manual_seed(1)    # reproducible
    
    # Hyper Parameters
    TIME_STEP = 10      # rnn time step
    INPUT_SIZE = 1      # rnn input size
    LR = 0.02           # learning rate
    
    # show data
    steps = np.linspace(0, np.pi*2, 100, dtype=np.float32)  # float32 for converting torch FloatTensor
    x_np = np.sin(steps)
    y_np = np.cos(steps)
    plt.plot(steps, y_np, 'r-', label='target (cos)')
    plt.plot(steps, x_np, 'b-', label='input (sin)')
    plt.legend(loc='best')
    plt.show()
    
    
    class RNN(nn.Module):
        def __init__(self):
            super(RNN, self).__init__()
    
            self.rnn = nn.RNN(
                input_size=INPUT_SIZE,
                hidden_size=32,     # rnn hidden unit
                num_layers=1,       # number of rnn layer
                batch_first=True,   # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
            )
            self.out = nn.Linear(32, 1)
    
        def forward(self, x, h_state):
            # x (batch, time_step, input_size)
            # h_state (n_layers, batch, hidden_size)
            # r_out (batch, time_step, hidden_size)
            r_out, h_state = self.rnn(x, h_state)
    
            outs = []    # save all predictions
            for time_step in range(r_out.size(1)):    # calculate output for each time step
                outs.append(self.out(r_out[:, time_step, :]))
            return torch.stack(outs, dim=1), h_state
    
            # instead, for simplicity, you can replace above codes by follows
            # r_out = r_out.view(-1, 32)
            # outs = self.out(r_out)
            # outs = outs.view(-1, TIME_STEP, 1)
            # return outs, h_state
            
            # or even simpler, since nn.Linear can accept inputs of any dimension 
            # and returns outputs with same dimension except for the last
            # outs = self.out(r_out)
            # return outs
    
    rnn = RNN()
    print(rnn)
    
    optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)   # optimize all cnn parameters
    loss_func = nn.MSELoss()
    
    h_state = None      # for initial hidden state
    
    plt.figure(1, figsize=(12, 5))
    plt.ion()           # continuously plot
    
    for step in range(100):
        start, end = step * np.pi, (step+1)*np.pi   # time range
        # use sin predicts cos
        steps = np.linspace(start, end, TIME_STEP, dtype=np.float32, endpoint=False)  # float32 for converting torch FloatTensor
        x_np = np.sin(steps)
        y_np = np.cos(steps)
    
        x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis])    # shape (batch, time_step, input_size)
        y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
    
        prediction, h_state = rnn(x, h_state)   # rnn output
        # !! next step is important !!
        h_state = h_state.data        # repack the hidden state, break the connection from last iteration
    
        loss = loss_func(prediction, y)         # calculate loss
        optimizer.zero_grad()                   # clear gradients for this training step
        loss.backward()                         # backpropagation, compute gradients
        optimizer.step()                        # apply gradients
    
        # plotting
        plt.plot(steps, y_np.flatten(), 'r-')
        plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
        plt.draw(); plt.pause(0.05)
    
    plt.ioff()
    plt.show()
    

    使用LSTM执行分类任务

    import torch
    from torch import nn
    import torchvision.datasets as dsets
    import torchvision.transforms as transforms
    import matplotlib.pyplot as plt
    
    
    # torch.manual_seed(1)    # reproducible
    
    # Hyper Parameters
    EPOCH = 1               # train the training data n times, to save time, we just train 1 epoch
    BATCH_SIZE = 64
    TIME_STEP = 28          # rnn time step / image height
    INPUT_SIZE = 28         # rnn input size / image width
    LR = 0.01               # learning rate
    DOWNLOAD_MNIST = True   # set to True if haven't download the data
    
    
    # Mnist digital dataset
    train_data = dsets.MNIST(
        root='./mnist/',
        train=True,                         # this is training data
        transform=transforms.ToTensor(),    # Converts a PIL.Image or numpy.ndarray to
                                            # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
        download=DOWNLOAD_MNIST,            # download it if you don't have it
    )
    
    # plot one example
    print(train_data.train_data.size())     # (60000, 28, 28)
    print(train_data.train_labels.size())   # (60000)
    plt.imshow(train_data.train_data[0].numpy(), cmap='gray')
    plt.title('%i' % train_data.train_labels[0])
    plt.show()
    
    # Data Loader for easy mini-batch return in training
    train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
    
    # convert test data into Variable, pick 2000 samples to speed up testing
    test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
    test_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255.   # shape (2000, 28, 28) value in range(0,1) 注意训练数据会自动规范化,但测试数据不会,所以这里要手动除以255,否则会导致训练不收敛
    test_y = test_data.test_labels.numpy()[:2000]    # covert to numpy array
    
    
    class RNN(nn.Module):
        def __init__(self):
            super(RNN, self).__init__()
    
            self.rnn = nn.LSTM(         # if use nn.RNN(), it hardly learns
                input_size=INPUT_SIZE,
                hidden_size=64,         # rnn hidden unit
                num_layers=1,           # number of rnn layer
                batch_first=True,       # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
            )
    
            self.out = nn.Linear(64, 10)
    
        def forward(self, x):
            # x shape (batch, time_step, input_size)
            # r_out shape (batch, time_step, output_size)
            # h_n shape (n_layers, batch, hidden_size)
            # h_c shape (n_layers, batch, hidden_size)
            r_out, (h_n, h_c) = self.rnn(x, None)   # None represents zero initial hidden state
    
            # choose r_out at the last time step
            out = self.out(r_out[:, -1, :])
            return out
    
    
    rnn = RNN()
    print(rnn)
    
    optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)   # optimize all cnn parameters
    loss_func = nn.CrossEntropyLoss()                       # the target label is not one-hotted
    
    # training and testing
    for epoch in range(EPOCH):
        for step, (b_x, b_y) in enumerate(train_loader):        # gives batch data
            b_x = b_x.view(-1, 28, 28)              # reshape x to (batch, time_step, input_size)
    
            output = rnn(b_x)                               # rnn output
            loss = loss_func(output, b_y)                   # cross entropy loss
            optimizer.zero_grad()                           # clear gradients for this training step
            loss.backward()                                 # backpropagation, compute gradients
            optimizer.step()                                # apply gradients
    
            if step % 50 == 0:
                test_output = rnn(test_x)                   # (samples, time_step, input_size)
                pred_y = torch.max(test_output, 1)[1].data.numpy()
                accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size)
                print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
    
    # print 10 predictions from test data
    test_output = rnn(test_x[:10].view(-1, 28, 28))
    pred_y = torch.max(test_output, 1)[1].data.numpy()
    print(pred_y, 'prediction number')
    print(test_y[:10], 'real number')
    
    

    参考:

  • 相关阅读:
    线性回归(Linear Regression)的理解及原理
    3个模型搞清楚用户留存分析
    机器学习简单介绍
    数据分析经典方法:5W2H分析法
    使用guava RateLimiter限流
    Maven之assembly自定义打包
    IDE自动编译
    神奇的$scope
    二分法查找
    深入理解CSS选择器优先级
  • 原文地址:https://www.cnblogs.com/lokvahkoor/p/12263953.html
Copyright © 2011-2022 走看看