zoukankan      html  css  js  c++  java
  • DARTS代码分析(Pytorch)

    最近在看DARTS的代码,有一个operations.py的文件,里面是对各类点与点之间操作的方法。

    OPS = {
        'none': lambda C, stride, affine: Zero(stride),
        'avg_pool_3x3': lambda C, stride, affine: PoolBN('avg', C, 3, stride, 1, affine=affine),
        'max_pool_3x3': lambda C, stride, affine: PoolBN('max', C, 3, stride, 1, affine=affine),
        'skip_connect': lambda C, stride, affine: 
            Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
        'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
        'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
        'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
        'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine), # 5x5
        'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine), # 9x9
        'conv_7x1_1x7': lambda C, stride, affine: FacConv(C, C, 7, stride, 3, affine=affine)
    }

    首先定义10个操作,依次解释:

    • class PoolBN(nn.Module):
          """
          AvgPool or MaxPool - BN
          """
          def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True):
              """
              Args:
                  pool_type: 'max' or 'avg'
              """
              super().__init__()
              if pool_type.lower() == 'max':
                  self.pool = nn.MaxPool2d(kernel_size, stride, padding)
              elif pool_type.lower() == 'avg':
                  self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False)
              else:
                  raise ValueError()
      
              self.bn = nn.BatchNorm2d(C, affine=affine)
      
          def forward(self, x):
              out = self.pool(x)
              out = self.bn(out)
              return out

      这是池化函数,有最大池化和平均池化方法,count_include_pad=False表示不把填充的0计算进去

    • class Identity(nn.Module):
          def __init__(self):
              super().__init__()
      
          def forward(self, x):
              return x

      这个表示skip conncet

    • class FactorizedReduce(nn.Module):
          """
          Reduce feature map size by factorized pointwise(stride=2).
          """
          def __init__(self, C_in, C_out, affine=True):
              super().__init__()
              self.relu = nn.ReLU()
              self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
              self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
              self.bn = nn.BatchNorm2d(C_out, affine=affine)
      
          def forward(self, x):
              x = self.relu(x)
              out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)
              out = self.bn(out)
              return out

      这个表示将特征图大小变为原来的一半

    • class DilConv(nn.Module):
          """ (Dilated) depthwise separable conv
          ReLU - (Dilated) depthwise separable - Pointwise - BN
      
          If dilation == 2, 3x3 conv => 5x5 receptive field
                            5x5 conv => 9x9 receptive field
          """
          def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
              super().__init__()
              self.net = nn.Sequential(
                  nn.ReLU(),
                  nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in,
                            bias=False),
                  nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False),
                  nn.BatchNorm2d(C_out, affine=affine)
              )
      
          def forward(self, x):
              return self.net(x)

      深度可分离卷积,groups=C_in,表示把输入特种图分成C_in(输入通道数)那么多组,然后加C_out(输出通道数)1*1的卷积,这样可以对每个通道单独提取特征,同时降低了参数量和计算量。

    • class SepConv(nn.Module):
          """ Depthwise separable conv
          DilConv(dilation=1) * 2
          """
          def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
              super().__init__()
              self.net = nn.Sequential(
                  DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine),
                  DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine)
              )
      
          def forward(self, x):
              return self.net(x)

      深度可分离卷积,由两个上面的深度分组卷积组成

    • class FacConv(nn.Module):
          """ Factorized conv
          ReLU - Conv(Kx1) - Conv(1xK) - BN
          """
          def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True):
              super().__init__()
              self.net = nn.Sequential(
                  nn.ReLU(),
                  nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False),
                  nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False),
                  nn.BatchNorm2d(C_out, affine=affine)
              )
      
          def forward(self, x):
              return self.net(x)

      这个表示长方形的卷积,增加了一点特征图的长和宽

    • class Zero(nn.Module):
          def __init__(self, stride):
              super().__init__()
              self.stride = stride
      
          def forward(self, x):
              if self.stride == 1:
                  return x * 0.
      
              # re-sizing by stride
              return x[:, :, ::self.stride, ::self.stride] * 0.

      这个表示把特种图的输出变为全是0,但特征图的大小会根据stride而改变

  • 相关阅读:
    Atitit. visual studio vs2003 vs2005 vs2008  VS2010 vs2012 vs2015新特性 新功能.doc
    Atitit. C#.net clr 2.0  4.0新特性
    Atitit. C#.net clr 2.0  4.0新特性
    Atitit.通过null 参数 反射  动态反推方法调用
    Atitit.通过null 参数 反射  动态反推方法调用
    Atitit..net clr il指令集 以及指令分类  与指令详细说明
    Atitit..net clr il指令集 以及指令分类  与指令详细说明
    Atitit.变量的定义 获取 储存 物理结构 基本类型简化 隐式转换 类型推导 与底层原理 attilaxDSL
    Atitit.变量的定义 获取 储存 物理结构 基本类型简化 隐式转换 类型推导 与底层原理 attilaxDSL
    Atitit.跨语言反射api 兼容性提升与增强 java c#。Net  php  js
  • 原文地址:https://www.cnblogs.com/yqpy/p/11453074.html
Copyright © 2011-2022 走看看