zoukankan      html  css  js  c++  java
  • TensorFlow实战6——TensorFlow实现VGGNet_16_D

      1 #coding = utf-8
      2 from datetime import datetime
      3 import tensorflow as tf
      4 import time
      5 import math
      6 
      7 def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
      8     n_in = input_op.get_shape()[-1].value
      9 
     10     '''创建卷积层并把本层的参数存入参数列表
     11     input_op:tensor
     12     name:层名
     13     kh:kernel height
     14     kw:kernel width
     15     n_out:卷积核数量即输出通道数
     16     dh:步长的高
     17     dw: 步长的宽
     18     p: 参数列表'''
     19     with tf.name_scope(name) as scope:
     20 
     21         kernel = tf.get_variable(scope+"w",
     22             shape=[kh, kw, n_in, n_out], dtype=tf.float32,
     23             initializer=tf.contrib.layers.xavier_initializer_conv2d())
     24 
     25         conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1),
     26                             padding = 'SAME')
     27         bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
     28         biases = tf.Variable(bias_init_val, trainable=True, name='b')
     29         z = tf.nn.bias_add(conv, biases)
     30         activation = tf.nn.relu(z, name=scope)
     31         p += [kernel, biases]
     32         return activation
     33 
     34 
     35 def fc_op(input_op, name, n_out, p):
     36     '''定义创建全连接层函数:'''
     37     n_in = input_op.get_shape()[-1].value#获取输入input_op的通道数
     38 
     39     with tf.name_scope(name) as scope:
     40         #创建全连接层参数,维度为2:n_in, n_out
     41         kernel = tf.get_variable(scope+"w",
     42                                 shape=[n_in, n_out], dtype=tf.float32,
     43                                 initializer=tf.contrib.layers.xavier_initializer())
     44         #偏置初始化为0.1,避免dead neoron
     45         biases = tf.Variable(tf.constant(0.1, shape=[n_out],
     46                                          dtype=tf.float32), name="b")
     47         #激活:relu非线性变换得到
     48         activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
     49         #添加全连接参数kernel和biases到参数列表p
     50         p += [kernel, biases]
     51 
     52         return activation
     53 
     54 
     55 def mpool_op(input_op, name, kh, kw, dh, dw):
     56     '''定义maxpool层
     57     input:input_op
     58     池化尺寸:khxkw
     59     步长:dhxdw
     60     padding:SAME
     61     padding="SAME" 输出尺寸为W/S(其中W为输入尺寸,S为stride)
     62     padding="VALID" 输出尺寸为(W-F+1)/S(其中F为卷积核尺寸)
     63     '''
     64 
     65     return tf.nn.max_pool(input_op,
     66                     ksize=[1, kh, kw, 1],
     67                     strides=[1, dh, dw, 1],
     68                     padding='SAME',
     69                     name=name)
     70 
     71 def inference_op(input_op, keep_prob):
     72 
     73     p = []
     74     '''第一层卷积神经网络:2个卷积层和一个最大池化层,
     75     2个卷积层卷积核大小为3x3,卷积核数量(输出通道数):64,步长为1x1
     76     输出尺寸:112x112x64'''
     77     conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1,
     78                       dw=1, p=p)
     79     conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1,
     80                       dw=1, p=p)
     81     pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dw=2, dh=2)
     82 
     83     '''第二层卷积神经网络:2个卷积层和一个最大池化层,
     84     2个卷积层卷积核大小为:3x3,输出通道数为:128
     85     输出尺寸为56x56x128'''
     86     conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1,
     87                       dw=1, p=p)
     88     conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1,
     89                       dw=1, p=p)
     90     pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2)
     91 
     92     '''第三层卷积神经网络:3个卷积层和一个最大池化层。
     93     3个卷积层的卷积核大小均为:3X3,输出通道数为:256
     94     输出尺寸为28x28x256'''
     95     conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1,
     96                       dw=1, p=p)
     97     conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1,
     98                       dw=1, p=p)
     99     conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1,
    100                       dw=1, p=p)
    101     pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2)
    102     '''第四层卷积神经网络:3个卷积层和一个最大池化层。
    103     3个卷积层的卷积核大小均为3x3,输出通道为512,
    104     输出尺寸为14x14x512'''
    105     conv4_1 = conv_op(pool3, name="conv3_1", kh=3, kw=3, n_out=512, dh=1,
    106                       dw=1, p=p)
    107     conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1,
    108                       dw=1, p=p)
    109     conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1,
    110                       dw=1, p=p)
    111     pool4 =mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2)
    112 
    113     '''第五层卷积神经网络:3个卷积层和一个最大池化层。
    114        3个卷积层的卷积核大小均为3x3,输出通道仍然为512,
    115        输出尺寸为7x7x512'''
    116     conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1,
    117                       dw=1, p=p)
    118     conv5_2 = conv_op(conv5_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1,
    119                       dw=1, p=p)
    120     conv5_3 = conv_op(conv5_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1,
    121                       dw=1, p=p)
    122     pool5 =mpool_op(conv5_3, name="pool5", kh=2, kw=2, dh=2, dw=2)
    123 
    124 
    125     '''输出结果扁平化,用tf.reshape将各个样本转化为7x7x512=25088的一维向量'''
    126     shp = pool5.get_shape()
    127     flattened_shape = shp[1].value*shp[2].value*shp[3].value
    128     resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")
    129 
    130     '''隐含节点数为4096的Fully connection,activation function:ReLu,
    131     再连接Dropout层,训练时节点保留率为0.5,预测时为1.0'''
    132     fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)
    133     fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc_drop")
    134 
    135     fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)
    136     fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")
    137 
    138     '''输出节点为1000的全连接层,并使用softmax进行分类得到分类输出概率'''
    139     fc8 = fc_op(fc7_drop, name="fc8", n_out=1000, p=p)
    140     softmax = tf.nn.softmax(fc8)
    141     #tf.argmax()求出输出概率的最大类别
    142     predictions = tf.argmax(softmax, 1)
    143     return  predictions, softmax, fc8, p
    144 
    145 
    146 def time_tensorflow_run(session, target, feed, info_string):
    147 
    148     n_steps_burn_in = 10
    149     total_duration = 0.0
    150     total_duration_squared = 0.0
    151     for i in range(num_batches+n_steps_burn_in):
    152         start_time = time.time()
    153         _ = session.run(target, feed_dict=feed)
    154         duration = time.time()-start_time
    155 
    156         if i>=n_steps_burn_in:
    157             if not i%10:
    158                 print('%s: step %d, duration = %.3f' %
    159                       (datetime.now(), i-n_steps_burn_in, duration))
    160                 total_duration += duration
    161                 total_duration_squared +=duration*duration
    162 
    163     mn = total_duration/num_batches
    164     vr = total_duration_squared/num_batches-mn*mn
    165     sd = math.sqrt(vr)
    166 
    167     print('%s: %s across %d steps, %.3f +/- %3.3f sec/batch' %
    168           (datetime.now(), info_string, num_batches, mn, sd))
    169 
    170 def run_benchmark():
    171 
    172     with tf.Graph().as_default():
    173         '''定义图片尺寸224,利用tf.random_normal函数生成标准差为0.1的正态分布
    174         的随机数来构建224x224的随机图片'''
    175         image_size = 64#GPU空间不够,改为64
    176         images = tf.Variable(tf.random_normal([batch_size,
    177                                                image_size,
    178                                                image_size, 3],
    179                                                dtype=tf.float32,
    180                                                stddev=1e-1))
    181         #构建keep_prob的placeholder
    182         keep_prob = tf.placeholder(tf.float32)
    183         #调用inference_op构建VGGNET-16网络结构,赋值给相应参数
    184         predictions, softmax, fc8, p = inference_op(images, keep_prob)
    185         init = tf.global_variables_initializer()
    186         sess = tf.Session()
    187         sess.run(init)
    188         #设置keep_prob为1.0,运用time_tensorflow_run来评测forward运算随机
    189         time_tensorflow_run(sess, predictions, {keep_prob:1.0}, "Forward")
    190         #计算fc8输出的loss
    191         objective = tf.nn.l2_loss(fc8)
    192         #求相对于loss的所有模型的梯度
    193         grad = tf.gradients(objective, p)
    194         #评测backward运算时间
    195         time_tensorflow_run(sess, grad, {keep_prob:0.5}, "Forward-backward")
    196 
    197 
    198 batch_size = 32
    199 num_batches = 100
    200 run_benchmark()
     1 2017-12-21 20:27:00.788000: step 0, duration = 0.055
     2 2017-12-21 20:27:01.343000: step 10, duration = 0.055
     3 2017-12-21 20:27:01.897000: step 20, duration = 0.055
     4 2017-12-21 20:27:02.451000: step 30, duration = 0.055
     5 2017-12-21 20:27:03.008000: step 40, duration = 0.055
     6 2017-12-21 20:27:03.566000: step 50, duration = 0.056
     7 2017-12-21 20:27:04.123000: step 60, duration = 0.056
     8 2017-12-21 20:27:04.679000: step 70, duration = 0.056
     9 2017-12-21 20:27:05.236000: step 80, duration = 0.056
    10 2017-12-21 20:27:05.793000: step 90, duration = 0.056
    11 2017-12-21 20:27:06.295000: Forward across 100 steps, 0.006 +/- 0.017 sec/batch
    12 
    13 2017-12-21 20:27:09.294000: step 0, duration = 0.188
    14 2017-12-21 20:27:11.173000: step 10, duration = 0.187
    15 2017-12-21 20:27:13.053000: step 20, duration = 0.188
    16 2017-12-21 20:27:14.934000: step 30, duration = 0.188
    17 2017-12-21 20:27:16.814000: step 40, duration = 0.188
    18 2017-12-21 20:27:18.695000: step 50, duration = 0.188
    19 2017-12-21 20:27:20.575000: step 60, duration = 0.188
    20 2017-12-21 20:27:22.456000: step 70, duration = 0.188
    21 2017-12-21 20:27:24.337000: step 80, duration = 0.188
    22 2017-12-21 20:27:26.217000: step 90, duration = 0.187
    23 2017-12-21 20:27:27.911000: Forward-backward across 100 steps, 0.019 +/- 0.056 sec/batch
  • 相关阅读:
    Map
    Enumeration输出
    iterator的基本用法
    Annotation整合工厂设计模式
    自定义Annotation
    Annotation
    动态代理设计模式
    静态代理设计模式
    自定义ClassLoader
    获取类的类对象的几种方式
  • 原文地址:https://www.cnblogs.com/millerfu/p/8094876.html
Copyright © 2011-2022 走看看