zoukankan      html  css  js  c++  java
  • 损失函数

    import torch
    from torch import nn
    import torch.nn.functional as F
    
    a = torch.tensor([[1,2,3],[4,5,6]])
    torch.sum(a)     #tensor(21)
    per_col_sum = torch.sum(a, dim=0)    #tensor([5, 7, 9]) 列堆叠
    print("per_col_sum=",per_col_sum)
    per_row_sum = torch.sum(a, dim=1)    #tensor([ 6, 15])  行堆叠
    print("per_row_sum=",per_row_sum)
    print(per_row_sum.reshape(-1,1))

    1、L1Loss-绝对值误差

    loss(x,yi) = |  x-   y |

    input_data = torch.Tensor([[4], [5], [6]]) #torch.Tensor---'torch.FloatTensor'  # torch.tensor---torch.LongTensor
    true_data = torch.Tensor([[3], [6], [8]])
    print("input_data=", input_data)
    print("true_data=", true_data)
    
    L1Loss_function_default = nn.L1Loss()
    loss_default = L1Loss_function_default(input_data, true_data)
    
    L1Loss_function_none = nn.L1Loss(reduction="none")
    loss_none = L1Loss_function_none(input_data, true_data)
    
    L1Loss_function_sum = nn.L1Loss(reduction="sum")
    loss_sum = L1Loss_function_sum(input_data, true_data)
    
    L1Loss_function_elementwise_mean = nn.L1Loss(reduction="elementwise_mean")
    loss_elementwise_mean = L1Loss_function_elementwise_mean(input_data, true_data)
    print("---------------------------------L1 loss------------------------------")
    print("loss_default=", loss_default)
    print("loss_none", loss_none)
    print("loss_sum=",loss_sum)
    print("loss_elementwise_mean=", loss_elementwise_mean)
    #验证---|y-f(x)|
    print((abs(4-3)+abs(5-6)+abs(6-8))/3)

    2、MSELoss(L2Loss)-均方误差

    loss(xi,yi)=(xi−yi)2

    MSELoss_function = nn.MSELoss()
    loss = MSELoss_function(input_data, true_data)
    print("---------------L2 loss-------------------")
    print(loss)
    #验证---|y-f(x)|**2
    print(((4-3)**2 + (5-6)**2 + (6-8)**2)/3)

    3、smooth-L1 loss

    # SmoothL1Loss-|y-f(x)|<=1, 0.5*(y-f(x))**2/Beta  otherwise-|y-f(x)|-0.5*Beta ,Beta=1
    print("---------------- SmoothL1Loss-------------")
    smoothL1Loss_function = nn.SmoothL1Loss()
    loss = smoothL1Loss_function(input_data, true_data)
    print(loss)
    #验证
    print( (0.5*(4-3)**2 + 0.5*(5-6)**2 + abs(6-8)-0.5)/3 )

    4、NLLLoss

    
    
    print("-----------------NLLLoss--------------------------")#负对数似然损失
    input_data = torch.Tensor([[1, 2, 3], [4, 5, 6], [2, 4, 6]])
    logsoftmax = F.log_softmax(input_data, dim=1)
    print("logsoftmax=",logsoftmax)

    label = torch.LongTensor([0,2,1])
    nll_loss_function = nn.NLLLoss()
    loss = nll_loss_function(logsoftmax, label) #-1/N (sum(y * logsoftmax(x)))
    print(loss)


    ##验证
    one_hot = F.one_hot(label).float()
    print("one_hot=",one_hot)
    print(input_data)
    print(torch.exp(input_data))
    print(torch.sum(torch.exp(input_data), dim=1))
    print(torch.sum(torch.exp(input_data), dim=1).reshape(-1,1))
    sm = torch.exp(input_data)/torch.sum(torch.exp(input_data), dim=1).reshape(-1,1) #每行之和为1

    logsm = torch.log(sm)
    print("logsm=",logsm)
    print("one_hot*logsm=", one_hot*logsm)
    nll_loss = -torch.sum(one_hot*logsm)/label.shape[0]
    print(nll_loss)


    sm = nn.Softmax(dim=1) #每行之和为1
    temp = torch.log(sm(input_data))
    #print(temp)
    print(-(temp[0][0]+temp[1][2]+temp[2][1])/3)
     

    5、交叉熵

    单个样本的交叉熵损失函数  L(y, y*) = - [ y * log(y*) + (1-y) * log(1-y*) ]  -----y:真值, y*:预测值

    print("-------------------- CrossEntropyLoss-------------------------")
    cross_entropy_loss_function = nn.CrossEntropyLoss()
    loss = cross_entropy_loss_function(input_data, label)
    print(loss)

    6、BCELoss 

    #BCELoss,要求样本必须在0~1之间,也就是需要调用sigmoid
    print("---------------------BCELoss-----------------------------------")
    bce_loss_function = nn.BCELoss()
    sig = nn.Sigmoid()
    input_sigmoid = sig(input_data)
    print("input_sigmoid=",input_sigmoid)
    bce_loss = bce_loss_function(input_sigmoid, one_hot)
    print("bce_loss=",bce_loss)
    
    #验证  one_hot= tensor([[1., 0., 0.],[0., 0., 1.],[0., 1., 0.]])
    #input_sigmoid= tensor([[0.7311, 0.8808, 0.9526],[0.9820, 0.9933, 0.9975],[0.8808, 0.9820, 0.9975]])
    import math
    a00 = -1.0 * (1 * math.log(0.7311) + (1-1) * math.log(1-0.7311))
    a01 = -1.0 * (0 * math.log(0.8808) +(1-0) * math.log(1-0.8808))
    a02 = -1.0 * (0 * math.log(0.9526) + (1-0) * math.log(1-0.9526))
    b1 = (a00+a01+a02)/3
    
    a10 = -1.0 * (math.log(1-0.9820))
    a11 = -1.0 * (math.log(1-0.9933))
    a12 = -1.0 * (math.log(0.9975))
    b2 = (a10+a11+a12)/3
    
    a20 = -1.0 * (math.log(1-0.8808))
    a21 = -1.0 * (math.log(0.9820))
    a22 = -1.0 * (math.log(1-0.9975))
    b3 = (a20+a21+a22)/3
    
    out = (b1+b2+b3)/3
    print("out=", out)

    7、BCEWithLogitsLoss

    print("--------------BCEWithLogitsLoss---------BCELoss和sigmoid融合---")
    bce_logistic_loss = nn.BCEWithLogitsLoss()
    loss_bce_logistic = bce_logistic_loss(input_data,one_hot)
    print("loss_bce_logistic=",loss_bce_logistic)
  • 相关阅读:
    [国嵌攻略][097][U-Boot新手入门]
    [国嵌攻略][070-095][Linux编程函数手册]
    自己写的切图工具(转)
    【总结整理】关于切图
    【总结整理】冯诺依曼体系结构
    【总结整理】面试需了解
    【总结整理】如何解决跨域问题
    【总结整理】WebGIS基础
    【总结整理】空间数据库基础
    【总结整理】WMS、WMTS、WFS
  • 原文地址:https://www.cnblogs.com/crazybird123/p/14632643.html
Copyright © 2011-2022 走看看