zoukankan      html  css  js  c++  java
  • Pytorch_3.6_ SOFTMAX回归的从零实现

    手动实现softmax回归

    import torch
    import torchvision
    import numpy as np
    import xiaobei_pytorch as xb
    

    3.6.1 获取数据

    batch_size = 256
    train_iter,test_iter = xb.load_data_fashion_mnist(batch_size=batch_size)
    

    3.6.2 初始化参数模型

    输入的fashion_mnist数据是28$ imes$28 = 784 个像素的图像,输出10个类别,单层神经网络输出层的个数为10,softmax的权重和偏差数量为 784$ imes$10和1$ imes$10的矩阵

    # 输入与输出
    num_inputs = 784
    num_outputs = 10
    
    # 权重和偏差
    W = torch.tensor(np.random.normal(0,0.01,(num_inputs,num_outputs)),dtype=torch.float)
    b = torch.zeros(num_outputs,dtype = torch.float)
    
    
    

    开启梯度跟随

    W.requires_grad_(requires_grad = True)
    b.requires_grad_(requires_grad = True)
    
    tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], requires_grad=True)
    

    3.6.3 tensor 按维度操作

    我们想对矩阵的列或者行元素进行求和 dim=0或者dim=1

    X = torch.tensor([[1,2,3],[4,5,6]])
    print(X.sum(dim = 0, keepdim = True))
    print(X.sum(dim = 1, keepdim = True))
    
    tensor([[5, 7, 9]])
    tensor([[ 6],
            [15]])
    
    def softmax(X):
        X_exp = X.exp()
        partition = X_exp.sum(dim = 1, keepdim=True)
        return X_exp / partition
    
    
    X = torch.rand((2,5))
    # y = torch.rand(2,2)
    print(X)
    X_prob = softmax(X)
    print(X_prob,X_prob.sum(dim = 1))
    
    
    tensor([[0.7006, 0.1504, 0.8269, 0.8514, 0.3227],
            [0.4950, 0.9123, 0.5274, 0.6243, 0.6404]])
    tensor([[0.2193, 0.1265, 0.2489, 0.2550, 0.1503],
            [0.1711, 0.2597, 0.1767, 0.1947, 0.1979]]) tensor([1.0000, 1.0000])
    

    3.6.4 定义模型

    把图像展开成一维向量 乘以权重W 加上偏差b

    def net(X):
    #     torch.mm  矩阵相乘  view()改变矩阵维度为1行 num_input列
        f_x = torch.mm(X.view((-1,num_inputs)),W) + b
        return softmax(f_x)
    
    

    3.6.5 定义损失函数

    y_hat = torch.tensor([[0.1,0.3,0.6],[0.3,0.2,0.5]])
    y = torch.LongTensor([0,2])
    y_hat.gather(1,y.view(-1,1))
    
    
    tensor([[0.1000],
            [0.5000]])
    
    def cross_entropy(y_hat, y):
        return -torch.log(y_hat.gather(1, y.view(-1,1)))
    
    

    3.6.6 计算分类准确性

    a = torch.randn(3,5)
    print(a)
    print(a.argmax(dim=1))
    
    def accuracy(y_hat, y):
        # y_hat 是预测概率分布 y 是真实值
        # argmax(dim = 1) 矩阵中每行最大值的索引
        return ((y_hat.argmax(dim=1)==y).float().mean().item())
    
    
    tensor([[-1.7017, -0.2468,  0.5864, -0.7538, -1.5446],
            [-0.1572, -0.1219,  0.0282, -0.7416, -0.5916],
            [ 0.2229,  1.2182, -2.1934, -0.3435,  1.4544]])
    tensor([2, 2, 4])
    
    print(accuracy(y_hat,y))
    
    
    0.5
    
    def evaluate_accuracy(data_iter,net):
        acc_sum,n = 0.0,0
        for X,y in data_iter:
    #         print(len(X)) 小批量数据集 每个X中有 256个图像
    #         print((net(X).argmax(dim=1)==y).float().sum().item())
            acc_sum += (net(X).argmax(dim=1)==y).float().sum().item()
            n+=y.shape[0]
    #     print(n)
        return acc_sum/n
    
    
    #用随机初始的网络模型net 对数据集进行分类 准确率应该是10分类的倒数0.1左右 
    print(evaluate_accuracy(test_iter,net))
    
    
    0.077
    

    3.6.7 训练模型

    • 目前搭建随机网络模型net
    def train_ch3(net, train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer = None):
        for epoch in range(num_epochs):
            #模型训练次数 5次
            train_l_num, train_acc_num,n = 0.0,0.0,0
            for X,y in train_iter:
                #X 为小批量256个图像 28*28 y为标签  
                # 计算X softmax下的值   与损失函数值
                y_hat = net(X) 
                l = loss(y_hat,y).sum()
                
                #梯度清零
                if optimizer is not None:
                    optimizer.zero_grad()
                elif params is not None and params[0].grad is not None:
                    for param in params:
                        param.grad.data.zero_()
                l.backward()
                if optimizer is None:
                    xb.sgd(params,lr,batch_size)
                else:
                    optimizer.step()
                train_l_num += l.item()
                train_acc_num += (y_hat.argmax(dim=1)==y).sum().item()
                n+= y.shape[0]
            test_acc = evaluate_accuracy(test_iter,net)
            print('epoch %d, loss %.4f,train_acc %.3f,test_acc %.3f'%(epoch+1,train_l_num/n, train_acc_num/n, test_acc))
    
    num_epochs ,lr = 5,0.1
    train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs,batch_size, [W, b], lr)
    
    epoch 1, loss 0.4351,train_acc 0.852,test_acc 0.836
    epoch 2, loss 0.4333,train_acc 0.852,test_acc 0.824
    epoch 3, loss 0.4303,train_acc 0.853,test_acc 0.838
    epoch 4, loss 0.4275,train_acc 0.855,test_acc 0.839
    epoch 5, loss 0.4257,train_acc 0.855,test_acc 0.839
    

    3.6.8 预测

    X,y = iter(test_iter).next()
    
    true_labels = xb.get_fashion_mnist_labels(y.numpy())
    pred_labels = xb.get_fashion_mnist_labels(net(X).argmax(dim = 1).numpy())
    titles = [true +'
    ' + pred for true,pred in zip(true_labels,pred_labels)]
    xb.show_fashion_mnist(X[10:19],titles[10:19])
    

  • 相关阅读:
    Windows下开发过程中常用的Linux指令
    flask sqlalchemy实现分页功能
    channel(3) 一 基本定义
    goroutine(2) goroutine同步
    goroutine(1) go的调度器
    go 语言 interface(接口 二)
    go 语言 interface(接口 一)
    go 语言 defer
    go 语言 闭包
    go 语言 函数
  • 原文地址:https://www.cnblogs.com/wangxiaobei2019/p/12916366.html
Copyright © 2011-2022 走看看