zoukankan      html  css  js  c++  java
  • VGG系列(Pytorch实现)

    github博客传送门
    csdn博客传送门

    论文在此:VERY DEEP CONVOLUTIONA NETWORK FO LARGE-SCAL IMAG RECOGNITION

    下载地址:https://arxiv.org/pdf/1409.1556.pdf

    网络结构图:

    VGG
    VGG参数

    Pytorch代码实现:

    import torch.nn as nn
    import math
    
    
    class VGG(nn.Module):
    
        def __init__(self, features, num_classes=1000, init_weights=True):
            super(VGG, self).__init__()
            self.features = features
            self.classifier = nn.Sequential(
                nn.Linear(512 * 7 * 7, 4096),
                nn.ReLU(True),
                nn.Dropout(),
                nn.Linear(4096, 4096),
                nn.ReLU(True),
                nn.Dropout(),
                nn.Linear(4096, num_classes),
            )
            if init_weights:
                self._initialize_weights()
    
        def forward(self, x):
            x = self.features(x)
            x = x.view(x.size(0), -1)
            x = self.classifier(x)
            return x
    
        def _initialize_weights(self):
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                    m.weight.data.normal_(0, math.sqrt(2. / n))
                    if m.bias is not None:
                        m.bias.data.zero_()
                elif isinstance(m, nn.BatchNorm2d):
                    m.weight.data.fill_(1)
                    m.bias.data.zero_()
                elif isinstance(m, nn.Linear):
                    m.weight.data.normal_(0, 0.01)
                    m.bias.data.zero_()
    
    
    def make_layers(cfg, batch_norm=False):
        layers = []
        in_channels = 3
        for v in cfg:
            if v == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
                if batch_norm:
                    layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
                else:
                    layers += [conv2d, nn.ReLU(inplace=True)]
                in_channels = v
        return nn.Sequential(*layers)
    
    
    cfg = {
        'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
        'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
        'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
        'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
    }
    
    
    def vgg11(**kwargs):
        model = VGG(make_layers(cfg['A']), **kwargs)
        return model
    
    
    def vgg11_bn(**kwargs):
        model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
        return model
    
    
    def vgg13(**kwargs):
        model = VGG(make_layers(cfg['B']), **kwargs)
        return model
    
    
    def vgg13_bn(**kwargs):
        model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
        return model
    
    
    def vgg16(**kwargs):
        model = VGG(make_layers(cfg['D']), **kwargs)
        return model
    
    
    def vgg16_bn(**kwargs):
        model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
        return model
    
    
    def vgg19(**kwargs):
        model = VGG(make_layers(cfg['E']), **kwargs)
        return model
    
    
    def vgg19_bn(**kwargs):
        model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
        return model
    
    
    if __name__ == '__main__':
        # 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19_bn', 'vgg19'
        # Example
        net11 = vgg11()
        print(net11)
    
    
  • 相关阅读:
    【网络流24题】魔术球问题(最小不相交路径覆盖)
    【网络流24题】搭配飞行员(太空飞行计划问题)(最大闭合图)
    【网络流24题】搭配飞行员(飞行员配对方案问题)(网络流)
    bzoj 1664 (贪心)
    关于正方形类问题
    就代码格式化问题
    提高组2017游记
    线程同步
    线程的优先级
    线程的常用方法
  • 原文地址:https://www.cnblogs.com/Mrzhang3389/p/10127071.html
Copyright © 2011-2022 走看看