zoukankan      html  css  js  c++  java
  • pytorch简单测试

    pytorch demo

    import torch.nn as nn
    import torch.nn.functional as F
    from torch.autograd import Variable
    import torch
    import torch.optim as optim
    
    class Net(nn.Module):#需要继承这个类
        def __init__(self):
            super(Net, self).__init__()       # 建立了两个卷积层,self.conv1, self.conv2,注意,这些层都是不包含激活函数的
            self.conv1 = nn.Conv2d(1, 6, 5)   # 1 input image channel, 6 output channels, 5x5 square convolution kernel
            self.conv2 = nn.Conv2d(6, 16, 5)  # 三个全连接层
            self.fc1 = nn.Linear(16*5*5, 120) # an affine operation: y = Wx + b
            self.fc2 = nn.Linear(120, 84)
            self.fc3 = nn.Linear(84, 10)
        def forward(self, x):                 # 注意,2D卷积层的输入data维数是 batchsize*channel*height*width
            x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))  # Max pooling over a (2, 2) window
            x = F.max_pool2d(F.relu(self.conv2(x)), 2)       # If the size is a square you can only specify a single number
            x = x.view(-1, self.num_flat_features(x))
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x
        def num_flat_features(self, x):
            size = x.size()[1:] # all dimensions except the batch dimension
            num_features = 1
            for s in size:
                num_features *= s
            return num_features
    
    
    # net = Net()
    # print(net)
    # print(len(list(net.parameters())))
    #
    # input = Variable(torch.randn(1, 1, 32, 32))
    # out = net(input)
    
    net = Net()   # create your optimizer
    optimizer = optim.SGD(net.parameters(), lr=0.01)
    
    learning_rate=0.001
    input_data=torch.randn(2, 1, 32, 32)
    # input_data=Variable(input_data)
    target=torch.FloatTensor(2, 10).random_(8)
    print(target)
    criterion = torch.nn.MSELoss(reduce=True, size_average=True)
    # in your training loop:
    for i in range(1000):
        optimizer.zero_grad()      # zero the gradient buffers,如果不归0的话,gradients会累加
        output = net(input_data)   # 这里就体现出来动态建图了,你还可以传入其他的参数来改变网络的结构
    
        loss = criterion(output, target)
        loss.backward()             # 得到grad,i.e.给Variable.grad赋值
        optimizer.step()            # Does the update,i.e.
    
    print(output)
    

    output

    tensor([[1., 3., 4., 3., 5., 1., 6., 6., 6., 6.],
            [1., 2., 2., 7., 2., 4., 0., 4., 3., 6.]])
    
    tensor([[1.0419, 3.0951, 4.0900, 3.2657, 5.1304, 1.1834, 6.0200, 6.1616, 6.1678,
             6.2592],
            [0.9804, 2.0937, 2.2189, 6.6986, 2.2809, 3.8273, 0.5658, 4.1855, 3.3320,
             6.0890]], grad_fn=<ThAddmmBackward>)
    

    线性回归

    import torch
    from torch.autograd import Variable
    
    # train data
    x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]]))
    y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]]))
    
    class Model(torch.nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.linear = torch.nn.Linear(1, 1)
            # One in and one out
        def forward(self, x):
            y_pred = self.linear(x)
            return y_pred
    
        # our model
    model = Model()
    
    criterion = torch.nn.MSELoss(size_average=False)
    # Defined loss function
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    # Defined optimizer
    #  Training: forward, loss, backward, step
    #  Training loop
    for epoch in range(500):
        #  Forward pass
        y_pred = model(x_data)
        # Compute loss
        loss = criterion(y_pred, y_data)
        print(epoch, loss.data[0])
        # Zero gradients
        optimizer.zero_grad()
        # perform backward pass
        loss.backward()
        # update weights
        optimizer.step()
    
    # After training
    
    hour_var = Variable(torch.Tensor([[7.0]]))
    print("predict (after training)", 4, model.forward(hour_var).data[0][0])
    

    逻辑回归

    import torch
    from torch.autograd import Variable
    
    x_data = Variable(torch.Tensor([[0.4], [1.0], [3.5], [4.0]]))
    y_data = Variable(torch.Tensor([[0.], [0.], [1.], [1.]]))
    
    class Model(torch.nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.linear = torch.nn.Linear(1, 1)
            # One in one out
            self.sigmoid = torch.nn.Sigmoid()
    
        def forward(self, x):
            y_pred = self.sigmoid(self.linear(x))
            return y_pred
            # Our model
    
    model = Model() # Construct loss function and optimizer
    criterion = torch.nn.BCELoss(size_average=True)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    # Training loop
    for epoch in range(80000): # Forward pass
    
        y_pred = model(x_data) # Compute loss
        loss = criterion(y_pred, y_data)
        if epoch % 20 == 0:
            print(epoch, loss.data[0])
            # Zero gradients
            optimizer.zero_grad() # Backward pass
            loss.backward() # update weights
            optimizer.step() # After training
    
    hour_var = Variable(torch.Tensor([[0.5]]))
    print("predict (after training)", 0.5, model.forward(hour_var).data[0][0])
    hour_var = Variable(torch.Tensor([[7.0]]))
    print("predict (after training)", 7.0, model.forward(hour_var).data[0][0])
    
    

    备注

    pytorch0.4 的tensor和variable合在一起了,所以可以直接计算,低版本的还需将tensor包装进variable才能求导。

  • 相关阅读:
    20179203李鹏举 《Linux内核原理与分析》第一周学习笔记
    20179223《Linux内核原理与分析》第八周学习笔记
    20179223《Linux内核原理与分析》第七周学习笔记
    20179223《Linux内核原理与解析》第六周学习笔记
    20179223《Linux内核原理与分析》第五周学习笔记
    20179223《Linux内核原理与分析》第三周学习笔记
    20179223《Linux内核原理与分析》第二周学习笔记
    20179223《Linux内核原理与分析》第一周学习笔记
    51nod贪心算法入门-----活动安排问题2
    51nod贪心算法入门-----活动安排问题
  • 原文地址:https://www.cnblogs.com/o-v-o/p/9975358.html
Copyright © 2011-2022 走看看