zoukankan      html  css  js  c++  java
  • pytorch 常用函数

    通过索引赋值

    a = torch.zeros([5,5])
    index = (torch.LongTensor([0,1]),torch.LongTensor([1,2]))
    a.index_put_((index), torch.Tensor([1,1]))
    
    a[index] = torch.Tensor([4,4])
    print(a)
    

    tensor([[0., 4., 0., 0., 0.],
    [0., 0., 4., 0., 0.],
    [0., 0., 0., 0., 0.],
    [0., 0., 0., 0., 0.],
    [0., 0., 0., 0., 0.]])
    pytorch之tensor按索引赋值,三种方法[https://blog.csdn.net/qq_41368074/article/details/106986753]

    注意,上面的这个:index是一个tensor负责x,一个tensor负责y。下面的是(0,3) (1,3) (2,2)

    a = torch.zeros([5,5])
    index = (torch.LongTensor([0,1,2]),torch.LongTensor([3,3,2]))#index = (torch.LongTensor([0,1]),torch.LongTensor([1,2]),torch.LongTensor([3,3]))
    a.index_put_((index), torch.Tensor([1,-100,-3]))
    
    # a[index] = torch.Tensor([4,4])
    

    tensor([[ 0., 0., 0., 1., 0.],
    [ 0., 0., 0., -100., 0.],
    [ 0., 0., -3., 0., 0.],
    [ 0., 0., 0., 0., 0.],
    [ 0., 0., 0., 0., 0.]])

    打印网络参数--大小

    需要先安装torchsummary

    from torchsummary import summary
    summary(model, input_size=(3, 324, 324), device='cpu')
    
    if __name__ == "__main__":
        model = ResNet(n_classes=1000, n_blocks=[3, 4, 23, 3])
        model.eval()
        image = torch.randn(1, 3, 224, 224)
    
        print(model)
        print("input:", image.shape)
        print("output:", model(image).shape)
    
        from torchsummary import summary
        summary(model, input_size=(3, 324, 324), device='cpu')
    

    打印如下:

    ---------------------------------------------------------------
            Layer (type)               Output Shape         Param #
    ================================================================
                Conv2d-1         [-1, 64, 162, 162]           9,408
           BatchNorm2d-2         [-1, 64, 162, 162]             128
                  ReLU-3         [-1, 64, 162, 162]               0
             MaxPool2d-4           [-1, 64, 82, 82]               0
                Conv2d-5           [-1, 64, 82, 82]           4,096
           BatchNorm2d-6           [-1, 64, 82, 82]             128
                  ReLU-7           [-1, 64, 82, 82]               0
                Conv2d-8           [-1, 64, 82, 82]          36,864
           BatchNorm2d-9           [-1, 64, 82, 82]             128
                 ReLU-10           [-1, 64, 82, 82]               0
               Conv2d-11          [-1, 256, 82, 82]          16,384
          BatchNorm2d-12          [-1, 256, 82, 82]             512
               Conv2d-13          [-1, 256, 82, 82]          16,384
          BatchNorm2d-14          [-1, 256, 82, 82]             512
          _Bottleneck-15          [-1, 256, 82, 82]               0
               Conv2d-16           [-1, 64, 82, 82]          16,384
          BatchNorm2d-17           [-1, 64, 82, 82]             128
                 ReLU-18           [-1, 64, 82, 82]               0
               Conv2d-19           [-1, 64, 82, 82]          36,864
        。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
    。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
                ReLU-313          [-1, 512, 11, 11]               0
              Conv2d-314         [-1, 2048, 11, 11]       1,048,576
         BatchNorm2d-315         [-1, 2048, 11, 11]           4,096
              Conv2d-316         [-1, 2048, 11, 11]       2,097,152
         BatchNorm2d-317         [-1, 2048, 11, 11]           4,096
         _Bottleneck-318         [-1, 2048, 11, 11]               0
              Conv2d-319          [-1, 512, 11, 11]       1,048,576
         BatchNorm2d-320          [-1, 512, 11, 11]           1,024
                ReLU-321          [-1, 512, 11, 11]               0
              Conv2d-322          [-1, 512, 11, 11]       2,359,296
         BatchNorm2d-323          [-1, 512, 11, 11]           1,024
                ReLU-324          [-1, 512, 11, 11]               0
              Conv2d-325         [-1, 2048, 11, 11]       1,048,576
         BatchNorm2d-326         [-1, 2048, 11, 11]           4,096
            Identity-327         [-1, 2048, 11, 11]               0
         _Bottleneck-328         [-1, 2048, 11, 11]               0
              Conv2d-329          [-1, 512, 11, 11]       1,048,576
         BatchNorm2d-330          [-1, 512, 11, 11]           1,024
                ReLU-331          [-1, 512, 11, 11]               0
              Conv2d-332          [-1, 512, 11, 11]       2,359,296
         BatchNorm2d-333          [-1, 512, 11, 11]           1,024
                ReLU-334          [-1, 512, 11, 11]               0
              Conv2d-335         [-1, 2048, 11, 11]       1,048,576
         BatchNorm2d-336         [-1, 2048, 11, 11]           4,096
            Identity-337         [-1, 2048, 11, 11]               0
         _Bottleneck-338         [-1, 2048, 11, 11]               0
    AdaptiveAvgPool2d-339           [-1, 2048, 1, 1]               0
             Flatten-340                 [-1, 2048]               0
              Linear-341                 [-1, 1000]       2,049,000
    ================================================================
    Total params: 44,549,160
    Trainable params: 44,549,160
    Non-trainable params: 0
    ----------------------------------------------------------------
    Input size (MB): 1.20
    Forward/backward pass size (MB): 894.98
    Params size (MB): 169.94
    Estimated Total Size (MB): 1066.12
    ----------------------------------------------------------------
    

    print(model)效果如下:

    ResNet(
      (layer1): _Stem(
        (conv1): _ConvBnReLU(
          (conv): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
          (bn): BatchNorm2d(64, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
          (relu): ReLU()
        )
        (pool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=True)
      )
      (layer2): _ResLayer(
        (block1): _Bottleneck(
          (reduce): _ConvBnReLU(
            (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn): BatchNorm2d(64, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
            (relu): ReLU()
          )
          (conv3x3): _ConvBnReLU(
            (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
            (bn): BatchNorm2d(64, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
            (relu): ReLU()
          )
          (increase): _ConvBnReLU(
            (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn): BatchNorm2d(256, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
          )
          (shortcut): _ConvBnReLU(
            (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn): BatchNorm2d(256, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
          )
        )
        (block2): _Bottleneck(
          (reduce): _ConvBnReLU(
            (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn): BatchNorm2d(64, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
            (relu): ReLU()
          )
          (conv3x3): _ConvBnReLU(
            (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
            (bn): BatchNorm2d(64, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
            (relu): ReLU()
          )
          (increase): _ConvBnReLU(
            (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn): BatchNorm2d(256, eps=1e-05, momentum=0.0010000000000000009, affine=True, track_running_stats=True)
          )
          (shortcut): Identity()
        )
    

    由于类别更改导致没法用预训练模型

     # Model setup
        model = DeepLabV2_ResNet101_MSC(n_classes=CONFIG.DATASET.N_CLASSES)
        state_dict = torch.load(CONFIG.MODEL.INIT_MODEL) ## 这是预训练模型
    
    /# 找到与类别数相关的层名,比如最后的全连接层肯定与类别数量相关,把预训练的相应的层名改了
    /#设置load_state_dict(new_state_dict, strict=False)的strict=False即可!
    /# model.base.load_state_dict(new_state_dict, strict=False)
    
        import collections
        new_state_dict = collections.OrderedDict()
        for k, v in state_dict.items():
            name = k.replace('base.','')
            if 'aspp' in name:
                name = name + '_2'
            new_state_dict[name] = v
    
    
        print("    Init:", CONFIG.MODEL.INIT_MODEL)
        for m in model.base.state_dict().keys():
            if m not in new_state_dict.keys():
                print("    Skip init:", m)
    
        model.base.load_state_dict(new_state_dict, strict=False) # model.base.load_state_dict(state_dict, strict=False)  # to skip ASPP
        model = nn.DataParallel(model)
        model.to(device)
    
        # Loss definition
        criterion = nn.CrossEntropyLoss(ignore_index=CONFIG.DATASET.IGNORE_LABEL)
        criterion.to(device)
    

    主要参考的 [https://github.com/kazuto1011/deeplab-pytorch]

    conf = labels[best_truth_idx],label的size=3,best_truth_idx的size=4,居然可以跑的通!!测试了一下,原来是下标索引赋值啊!!

        labels = torch.tensor([1,2,3])
        best_truth_idx = torch.tensor([1, 0, 0, 2])
        conf = labels[best_truth_idx]
        print(conf)
    #打印:  tensor([2, 1, 1, 3])
    

    若best_truth_idx = torch.tensor([1, 0, 0, 3])就会报错,超过下标越界

    RuntimeError: index 3 is out of bounds for dim with size 3
    

    所以,索引的不能超过labels的size

  • 相关阅读:
    高级搜索
    Hibernate通过什么方法可以把私有成员变量赋值成数据库查询到的值然后返回POJO对象呢?
    hibernate继承关系映射和java反射机制的运用
    eclipse启动停止--jdk环境变量配置
    visual studio 正则表达式 查找 替换
    常用正则表达式
    解决兼容的方法!
    JS的定时器和JS的执行机制
    2020年3月2日随笔
    2020年2月12日 线上笔记
  • 原文地址:https://www.cnblogs.com/yanghailin/p/13206418.html
Copyright © 2011-2022 走看看