zoukankan      html  css  js  c++  java
  • TensorFlow实战7——TensorFlow实现Google_Inception_V3

      1 #coding=utf-8
      2 import tensorflow as tf
      3 from datetime import  datetime
      4 import math
      5 import time
      6 slim = tf.contrib.slim
      7 trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
      8 
      9 def inception_v3_arg_scope(weight_decay=0.00004, stddev=0.1,batch_norm_var_collection='moving_vars'):
     10 
     11     '''作用:生成网络中经常用到的默认参数。
     12     weight_decay:L2正则,设为默认0.00004
     13     stddev:标准差'''
     14     #batch_normalization参数字典
     15     batch_norm_params = {
     16         'decay':0.9997,#衰减系数
     17         'epsilon':0.001,
     18         'updates_collections':tf.GraphKeys.UPDATE_OPS,
     19         'variables_collections':{
     20             'beta':None,
     21             'gamma':None,
     22             'moving_mean':[batch_norm_var_collection],
     23             'moving_variance':[batch_norm_var_collection],
     24         }
     25     }
     26     #slim.arg_scope给函数参数自动赋予默认值
     27     with slim.arg_scope([slim.conv2d, slim.fully_connected],
     28                         weights_regularizer=slim.l2_regularizer(weight_decay)):
     29         #嵌套slim.arg_scope
     30         with slim.arg_scope(
     31                 [slim.conv2d],
     32                 weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
     33                 activation_fn=tf.nn.relu,
     34                 normalizer_fn=slim.batch_norm,
     35                 normalizer_params=batch_norm_params) as sc:
     36             return sc
     37 
     38 def inception_v3_base(input, scope=None):
     39     '''作用生成v3网络的卷积部分
     40     input:输入图片数据的tensor
     41     scope:函数默认的参数环境'''
     42 
     43     end_points = {}#保存关键节点
     44 
     45     with tf.variable_scope(scope, 'InceptionsV3', [inputs]):
     46 
     47         with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
     48                             stride=1, padding='VALID'):
     49             '''定义5个卷积层和2个最大池化层, 输入尺寸为299x299x3,输出尺寸为35x35x192
     50             输入:tensor,32:输出的通道数,[3, 3]:卷积核尺寸,stride:步长'''
     51             net = slim.conv2d(inputs, 32, [3, 3], stride=2, scope='Conv2d_1a_3x3')
     52             net = slim.conv2d(net, 32, [3, 3], scope='Conv2d_2a_3x3')
     53             net = slim.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv2d_2b_3x3')
     54             net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_3a_3x3' )
     55             net = slim.conv2d(net, 80, [1, 1], scope='Conv2d_3b_1x1')
     56             net = slim.conv2d(net, 192, [3, 3], scope='Conv2d_4a_3x3')
     57             net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_5a_3x3')
     58 
     59         with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
     60             '''定义3个连续的Inception模块组'''
     61 
     62             with tf.variable_scope('Mixed_5b'):
     63 
     64                 with tf.variable_scope('Branch_0'):
     65                     '''64:输出通道 1x1的卷积核'''
     66                     branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
     67                 with tf.variable_scope('Branch_1'):
     68                     '''48输出通道的1x1卷积核连接64输出通道的5x5卷积核'''
     69                     branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
     70                     branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0b_5x5')
     71                 with tf.variable_scope('Branch_2'):
     72                     '''64输出通道的1x1卷积核连接2个96输出通道的3x3卷积核'''
     73                     branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
     74                     branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
     75                     branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
     76                 with tf.variable_scope('Branch_3'):
     77                     '''3x3的平均池化连接32输出通道的1x1卷积核'''
     78                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
     79                     branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
     80                 '''tf.concat将4分支合并在一起,形成module的最终输出35x35x(64+64+96+32)'''
     81                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
     82 
     83 
     84             with tf.variable_scope('Mixed_5c'):
     85                 with tf.variable_scope('Branch_0'):
     86                     '''64:输出通道 1x1的卷积核'''
     87                     branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
     88                 with tf.variable_scope('Branch_1'):
     89                     '''48输出通道的1x1卷积核连接64输出通道的5x5卷积核'''
     90                     branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1x1')
     91                     branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0c_5x5')
     92                 with tf.variable_scope('Branch_2'):
     93                     '''64输出通道的1x1卷积核连接2个96输出通道的3x3卷积核'''
     94                     branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
     95                     branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
     96                     branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
     97                 with tf.variable_scope('Branch_3'):
     98                     '''3x3的平均池化连接32输出通道的1x1卷积核'''
     99                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    100                     branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
    101                 '''tf.concat将4分支合并在一起,形成module的最终输出35x35x(64+64+96+64)'''
    102                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    103 
    104 
    105             with tf.variable_scope('Mixed_5d'):
    106 
    107                 with tf.variable_scope('Branch_0'):
    108                     '''64:输出通道 1x1的卷积核'''
    109                     branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
    110                 with tf.variable_scope('Branch_1'):
    111                     '''48输出通道的1x1卷积核连接64输出通道的5x5卷积核'''
    112                     branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1x1')
    113                     branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0c_5x5')
    114                 with tf.variable_scope('Branch_2'):
    115                     '''64输出通道的1x1卷积核连接2个96输出通道的3x3卷积核'''
    116                     branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
    117                     branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
    118                     branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
    119                 with tf.variable_scope('Branch_3'):
    120                     '''3x3的平均池化连接32输出通道的1x1卷积核'''
    121                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    122                     branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
    123                 '''tf.concat将4分支合并在一起,形成module的最终输出35x35x(64+64+96+64)'''
    124                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    125 
    126             with tf.variable_scope('Mixed_6a'):
    127 
    128                 with tf.variable_scope('Branch_0'):
    129                     '''384:输出通道 3x3的卷积核 由于步长为2,输出尺寸为17x17x384'''
    130                     branch_0 = slim.conv2d(net, 384, [3, 3], stride=2,
    131                                            padding='VALID', scope='Conv2d_1a_1x1')
    132                 with tf.variable_scope('Branch_1'):
    133                     '''64输出通道的1x1卷积核连接2个96输出通道的3x3卷积核,输出尺寸为17x17x96 '''
    134                     branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
    135                     branch_1 = slim.conv2d(branch_1, 96, [3, 3],
    136                                            scope='Conv2d_0b_3x3')
    137                     branch_1 = slim.conv2d(branch_1, 96, [3, 3], stride=2,
    138                                            padding='VALID', scope='Conv2d_1a_3x3')
    139                 with tf.variable_scope('Branch_2'):
    140                     '''3x3最大池化层, 输出尺寸为17x17x256'''
    141                     branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
    142                                            padding='VALID', scope='MaxPool_1a_3x3')
    143 
    144                 '''tf.concat将3分支合并在一起,形成module的最终输出17x17x(384+48+64)'''
    145                 net = tf.concat([branch_0, branch_1, branch_2], 3)
    146 
    147             with tf.variable_scope('Mixed_6b'):
    148 
    149                 with tf.variable_scope('Branch_0'):
    150                     '''192:输出通道 1x1的卷积核 '''
    151                     branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
    152 
    153                 with tf.variable_scope('Branch_1'):
    154                     '''第一层:128输出通道的1x1的卷积
    155                        第二层:128输出通道的1x7的卷积
    156                        第三层:192输出通道的7x1的卷积'''
    157                     branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
    158                     branch_1 = slim.conv2d(branch_1, 128, [1, 7], scope='Conv2d_0b_1x7')
    159                     branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
    160 
    161                 with tf.variable_scope('Branch_2'):
    162                     '''第一层:128输出通道的1x1的卷积
    163                        第二层:128输出通道的7x1的卷积
    164                        第三层:128输出通道的1x7的卷积
    165                        第四层:128输出通道的7x1的卷积
    166                        第五层:192输出通道的1x7的卷积'''
    167                     branch_2 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
    168                     branch_2 = slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0b_7x1')
    169                     branch_2 = slim.conv2d(branch_2, 128, [1, 7], scope='Conv2d_0c_1x1')
    170                     branch_2 = slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0d_7x1')
    171                     branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
    172 
    173                 with tf.variable_scope('Branch_3'):
    174                     '''3x3的平均池化连接192输出通道的1x1卷积核'''
    175                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    176                     branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
    177 
    178                 '''tf.concat将4分支合并在一起,形成module的最终输出17x17x(192+192+192+192)'''
    179                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    180 
    181             with tf.variable_scope('Mixed_6c'):
    182                 with tf.variable_scope('Branch_0'):
    183                     '''192:输出通道 1x1的卷积核 '''
    184                     branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
    185 
    186                 with tf.variable_scope('Branch_1'):
    187                     '''第一层:160输出通道的1x1的卷积
    188                        第二层:160输出通道的1x7的卷积
    189                        第三层:192输出通道的7x1的卷积'''
    190                     branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
    191                     branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
    192                     branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
    193 
    194                 with tf.variable_scope('Branch_2'):
    195                     '''第一层:160输出通道的1x1的卷积
    196                        第二层:160输出通道的7x1的卷积
    197                        第三层:160输出通道的1x7的卷积
    198                        第四层:160输出通道的7x1的卷积
    199                        第五层:192输出通道的1x7的卷积'''
    200                     branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
    201                     branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
    202                     branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x1')
    203                     branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
    204                     branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
    205 
    206                 with tf.variable_scope('Branch_3'):
    207                     '''3x3的平均池化连接192输出通道的1x1卷积核'''
    208                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    209                     branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
    210 
    211                 '''tf.concat将4分支合并在一起,形成module的最终输出17x17x(192+192+192+192)'''
    212                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    213 
    214 
    215             with tf.variable_scope('Mixed_6e'):
    216                 with tf.variable_scope('Branch_0'):
    217                     '''192:输出通道 1x1的卷积核 '''
    218                     branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
    219 
    220                 with tf.variable_scope('Branch_1'):
    221                     '''第一层:160输出通道的1x1的卷积
    222                        第二层:160输出通道的1x7的卷积
    223                        第三层:192输出通道的7x1的卷积'''
    224                     branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
    225                     branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
    226                     branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
    227 
    228                 with tf.variable_scope('Branch_2'):
    229                     '''第一层:160输出通道的1x1的卷积
    230                        第二层:160输出通道的7x1的卷积
    231                        第三层:160输出通道的1x7的卷积
    232                        第四层:160输出通道的7x1的卷积
    233                        第五层:192输出通道的1x7的卷积'''
    234                     branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
    235                     branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
    236                     branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x1')
    237                     branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
    238                     branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
    239 
    240                 with tf.variable_scope('Branch_3'):
    241                     '''3x3的平均池化连接192输出通道的1x1卷积核'''
    242                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    243                     branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
    244 
    245                 '''tf.concat将4分支合并在一起,形成module的最终输出17x17x(192+192+192+192)'''
    246                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    247 
    248             with tf.variable_scope('Mixed_6d'):
    249                 with tf.variable_scope('Branch_0'):
    250                     '''192:输出通道 1x1的卷积核 '''
    251                     branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
    252 
    253                 with tf.variable_scope('Branch_1'):
    254                     '''第一层:160输出通道的1x1的卷积
    255                        第二层:160输出通道的1x7的卷积
    256                        第三层:192输出通道的7x1的卷积'''
    257                     branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
    258                     branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
    259                     branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
    260 
    261                 with tf.variable_scope('Branch_2'):
    262                     '''第一层:160输出通道的1x1的卷积
    263                        第二层:160输出通道的7x1的卷积
    264                        第三层:160输出通道的1x7的卷积
    265                        第四层:160输出通道的7x1的卷积
    266                        第五层:192输出通道的1x7的卷积'''
    267                     branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
    268                     branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
    269                     branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x1')
    270                     branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
    271                     branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
    272 
    273                 with tf.variable_scope('Branch_3'):
    274                     '''3x3的平均池化连接192输出通道的1x1卷积核'''
    275                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    276                     branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
    277 
    278                 '''tf.concat将4分支合并在一起,形成module的最终输出17x17x(192+192+192+192)'''
    279                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    280             '''将Mixed_6e存储于end_points中,作为Auxiliary Classifier 辅助模型的分类'''
    281             end_points['Mixed_6e'] = net
    282 
    283             with tf.variable_scope('Mixed_7a'):
    284                 with tf.variable_scope('Branch_0'):
    285                     '''192:输出通道 1x1的卷积核再连接320输出通道数的3x3的卷积,步长为2,
    286                     由于padding为VALID, 图片尺寸压缩为8x8'''
    287                     branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
    288                     branch_0 = slim.conv2d(branch_0, 320, [3, 3], stride=2,
    289                                            padding='VALID', scope='Conv2d_1a_3x3')
    290 
    291                 with tf.variable_scope('Branch_1'):
    292                     '''第一层:192输出通道的1x1的卷积
    293                        第二层:192输出通道的1x7的卷积
    294                        第三层:192输出通道的7x1的卷积
    295                        第四层:192输出通道的3x3的卷积
    296                        tensor输出尺寸为8x8x192'''
    297                     branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
    298                     branch_1 = slim.conv2d(branch_1, 192, [1, 7], scope='Conv2d_0b_1x7')
    299                     branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
    300                     branch_1 = slim.conv2d(branch_1, 192, [3, 3], stride=2,
    301                                        padding='VALID', scope='Conv2d_1a_3x3')
    302                 with tf.variable_scope('Branch_2'):
    303                     '''3x3的最大池化层,步长为2,padding为VALID,由于池化层对输出通道不会产生改变
    304                     故输出尺寸为8x8x768'''
    305                     branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
    306                                            padding='VALID', scope='MaxPool_1a_3x3')
    307                 '''tf.concat将3分支合并在一起,形成module的最终输出8x8x(320+192+768)'''
    308                 net = tf.concat([branch_0, branch_1, branch_2], 3)
    309 
    310             with tf.variable_scope('Mixed_7b'):
    311 
    312                 with tf.variable_scope('Branch_0'):
    313                     '''320输出通道的1x1的卷积'''
    314                     branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
    315 
    316                 with tf.variable_scope('Branch_1'):
    317                     '''第一层:1个384输出通道的1x1卷积
    318                        第二层:由384输出通道的1x3的卷积和384输出通道的3x1的卷积合并
    319                        tensor输出尺寸为:8x8x(384+384)=8x8x768'''
    320                     branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
    321                     branch_1 = tf.concat([
    322                         slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
    323                         slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0c_3x1'),], 3)
    324 
    325 
    326                 with tf.variable_scope('Branch_2'):
    327                     '''第一层:1个448输出通道的1x1卷积
    328                        第二层:384输出通道的3x3的卷积分支内拆成两个分支分别是:
    329                        384输出通道的1x3的卷积和384输出通道的3x1的卷积最后合并
    330                        tensor输出尺寸为:8x8x(384+384)=8x8x768'''
    331                     branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
    332                     branch_2 = slim.conv2d(branch_2, 448, [3, 3], scope='Conv2d_0b_3x3')
    333                     branch_2 = tf.concat([
    334                         slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
    335                         slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1'),], 3)
    336 
    337                 with tf.variable_scope('Branch_3'):
    338                     '''3x3平均池化层后接一个192输出通道的1x1卷积
    339                     tensor输出尺寸为:8x8x192'''
    340                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    341                     branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
    342 
    343                 '''tf.concat将3分支合并在一起,形成module的最终输出8x8x(320+768+768+192)'''
    344                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    345 
    346             with tf.variable_scope('Mixed_7c'):
    347                 with tf.variable_scope('Branch_0'):
    348                     '''320输出通道的1x1的卷积'''
    349                     branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
    350 
    351                 with tf.variable_scope('Branch_1'):
    352                     '''第一层:1个384输出通道的1x1卷积
    353                        第二层:由384输出通道的1x3的卷积和384输出通道的3x1的卷积合并
    354                        tensor输出尺寸为:8x8x(384+384)=8x8x768'''
    355                     branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
    356                     branch_1 = tf.concat([
    357                         slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
    358                         slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0c_3x1'), ], 3)
    359 
    360                 with tf.variable_scope('Branch_2'):
    361                     '''第一层:1个448输出通道的1x1卷积
    362                        第二层:384输出通道的3x3的卷积分支内拆成两个分支分别是:
    363                        384输出通道的1x3的卷积和384输出通道的3x1的卷积最后合并
    364                        tensor输出尺寸为:8x8x(384+384)=8x8x768'''
    365                     branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
    366                     branch_2 = slim.conv2d(branch_2, 448, [3, 3], scope='Conv2d_0b_3x3')
    367                     branch_2 = tf.concat([
    368                         slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
    369                         slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1'), ], 3)
    370 
    371                 with tf.variable_scope('Branch_3'):
    372                     '''3x3平均池化层后接一个192输出通道的1x1卷积
    373                     tensor输出尺寸为:8x8x192'''
    374                     branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
    375                     branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
    376 
    377                 '''tf.concat将3分支合并在一起,形成module的最终输出8x8x(320+768+768+192)'''
    378                 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
    379 
    380             return net, end_points
    381 
    382 
    383 def inception_v3(inputs,
    384                  num_classes=1000,
    385                  is_training=True,
    386                  dropout_keep_prob=0.8,
    387                  prediction_fn=slim.softmax,
    388                  spatial_squeeze=True,
    389                  reuse=None,
    390                  scope='Inceptionv3'):
    391     '''作用:全局平均池化、softmax、Auxiliary Logits
    392     num_classes:最后需要分类的数量
    393     is_training:是否是训练过程标志
    394     dropout_keep_prob:Dropout所需保留节点的比例
    395     prediction_fn:最后用来分类的函数
    396     spatial_squeeze:是否进行squeeze标志,(去除维数的操作5x3x1->5x3)
    397     reuse:是否对网络和Variable进行重复使用的标志
    398     scope:包含默认参数的环境'''
    399 
    400     '''使用tf.variable_scope定义网络name、 reuse等参数'''
    401     with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
    402         '''使用slim.arg_scope定义Batch Normalization和Dropout的is_training标志的默认值'''
    403         with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
    404             #获取最后一层输出net,end_points
    405             net, end_points = inception_v3_base(inputs, scope=scope)
    406 
    407             with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
    408                                 stride=1, padding='SAME'):
    409                 '''处理Auxiliary Logits部分的逻辑:
    410                 使用slim.avg_pool2d将卷积、最大池化、
    411                 平均池化的默认步长设为3,padding设为VALID,end_points获取Mixed_6e'''
    412 
    413                 aux_logits = end_points['Mixed_6e']
    414 
    415                 with tf.variable_scope('AuxLogits'):
    416                     '''Mixed_6e接5x5平均池化,步长为3,padding为VALID
    417                     输出尺寸:17x17x768->5x5x768((17-5+1)/3=5)'''
    418 
    419                     aux_logits = slim.avg_pool2d(
    420                         aux_logits, [5, 5], stride=3, padding='VALID',
    421                         scope='AvgPool_1a_5x5')
    422                     '''128输出通道的1x1卷积'''
    423                     aux_logits = slim.conv2d(
    424                         aux_logits, 128, [1, 1], scope = 'Conv2d_1b_1x1')
    425                     '''768输出通道的5x5卷积,
    426                        输出尺寸变为1x1x768'''
    427                     aux_logits = slim.conv2d(
    428                         aux_logits, 768, [5, 5],
    429                         weights_initializer=trunc_normal(0.01),
    430                         padding='VALID', scope='Conv2d_2a_5x5')
    431                     '''输出通道为num_classes的1x1的卷积,
    432                        输出尺寸变为1x1x1000'''
    433                     aux_logits = slim.conv2d(
    434                         aux_logits, num_classes, [1, 1], activation_fn=None,
    435                         normalizer_fn=None, weights_initializer=trunc_normal(0.01),
    436                         scope='Conv2d_2b_1x1')
    437 
    438                     if spatial_squeeze:
    439                         '''将1x1x1000通过tf.squeeze变为1000存储到end_points'''
    440                         aux_logits = tf.squeeze(aux_logits, [1, 2],
    441                                                name='SpatialSqueeze')
    442                     end_points['AuxLogits'] = aux_logits
    443 
    444                 with tf.variable_scope('Logits'):
    445                     '''处理正常分类预测的逻辑:
    446                     对Mixed_7e最后一个卷积层的输出进行8x8全局平均化池化,padding为VALID
    447                     输出尺寸:8x8x2048->1x1x2048
    448                     '''
    449                     net = slim.avg_pool2d(net, [8, 8], padding='VALID', scope='AvgPool_1a_8x8')
    450                     '''连接一个Dropout层,节点保留率:dropout_keep_prob'''
    451                     net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
    452 
    453                     end_points['PreLogits'] = net
    454                     '''1000输出通道1x1的卷积'''
    455                     logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
    456                                          normalizer_fn=None, scope='Conv2d_1c_1x1')
    457 
    458                     if spatial_squeeze:
    459                         '''tf.sqeeze去一维'''
    460                         logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
    461 
    462                 end_points['Logits'] = logits
    463                 '''接softmax对结果进行分类预测'''
    464                 end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
    465             '''返回结果logits,包含辅助节点的end_points'''
    466             return logits, end_points
    467 
    468 def time_tensorflow_run(session, target, info_string):
    469 
    470     num_steps_burn_in = 10
    471     total_duration = 0.0
    472     total_duration_squared = 0.0
    473     for i in range(num_batches+num_steps_burn_in):
    474         start_time = time.time()
    475         _ = session.run(target)
    476         duration = time.time()-start_time
    477 
    478         if i >= num_steps_burn_in:
    479             if not i % 10:
    480                 print('%s: step %d, duration = %.3f' %(datetime.now(), i-num_steps_burn_in, duration))
    481                 total_duration += duration
    482                 total_duration_squared += duration*duration
    483 
    484     mn = total_duration/num_batches
    485     vr = total_duration_squared/num_batches-mn*mn
    486     sd = math.sqrt(vr)
    487 
    488     print('%s: %s across %d steps, %.3f +/- %3.3f sec/batch' %(datetime.now(), info_string, num_batches, mn, sd))
    489 
    490 batch_size = 32
    491 height, width = 299, 299
    492 
    493 inputs = tf.random_uniform((batch_size, height, width, 3))
    494 
    495 with slim.arg_scope(inception_v3_arg_scope()):
    496     logits, end_points = inception_v3(inputs, is_training=False)
    497 
    498 init = tf.global_variables_initializer()
    499 sess = tf.Session()
    500 sess.run(init)
    501 num_batches = 100
    502 time_tensorflow_run(sess, logits, "Forward")
     1 2017-12-23 23:46:17.904100: step 0, duration = 0.467
     2 2017-12-23 23:46:22.557100: step 10, duration = 0.464
     3 2017-12-23 23:46:27.201100: step 20, duration = 0.463
     4 2017-12-23 23:46:31.848100: step 30, duration = 0.466
     5 2017-12-23 23:46:36.495100: step 40, duration = 0.464
     6 2017-12-23 23:46:41.148100: step 50, duration = 0.464
     7 2017-12-23 23:46:45.795100: step 60, duration = 0.464
     8 2017-12-23 23:46:50.458100: step 70, duration = 0.467
     9 2017-12-23 23:46:55.098100: step 80, duration = 0.464
    10 2017-12-23 23:46:59.741100: step 90, duration = 0.463
    11 2017-12-23 23:47:03.926100: Forward across 100 steps, 0.046 +/- 0.139 sec/batch
  • 相关阅读:
    TOEFL资料 280多个
    Eclipse搭建J2ME开发环境
    Session.Abandon和Session.Clear有何不同
    进程之同步、互斥PV操作笔记
    Windows Mobile 6.5 实现联系人分组显示
    关于数据库的版本控制
    xhtml的布局,满屏,高度自适应
    MOSS 项目模板
    DNN中与模块相关的信息
    J2EE学习笔记
  • 原文地址:https://www.cnblogs.com/millerfu/p/8094891.html
Copyright © 2011-2022 走看看