zoukankan      html  css  js  c++  java
  • TensorFlow实战5——TensorFlow实现AlexNet

      1 #coding = utf-8
      2 
      3 from datetime import datetime
      4 import math
      5 import time
      6 import tensorflow as tf
      7 
      8 batch_size = 32
      9 num_batches = 1000
     10 
     11 def print_activations(t):
     12     '''打印每一个卷积层或池化层输出tensor的尺寸
     13     t:tensor t.op.name:tensor的名称 ;
     14     t.get-shape.as_list():tensor尺寸'''
     15     print(t.op.name, '', t.get_shape().as_list())
     16 
     17 def interence(images):
     18     '''input: images; return: 最后一层pool5及parameters
     19     '''
     20     parameters = []
     21 
     22     with tf.name_scope('conv1') as scope:
     23         #定义第一个卷积层,卷积核尺寸为11x11,颜色通道为3,卷积核数量为64
     24         kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64],
     25                             dtype=tf.float32, stddev=1e-1, name='weights'))
     26         #对输入的images进行卷积操作,strides步长设置为4x4
     27         conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')
     28         #biases全部初始化为0
     29         biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
     30                              trainable=True, name='biases')
     31         #将卷积结果conv和biases
     32         bias = tf.nn.bias_add(conv, biases)
     33         #rele对结果进行非线性处理
     34         conv1 = tf.nn.relu(bias, name=scope)
     35         print_activations(conv1)
     36         #将这一层的参数kernel和biases添加到parameters
     37         parameters += [kernel, biases]
     38     #LRN层,depth_radius=4,等都是AlexNet论文中推荐值
     39     lrn1 = tf.nn.lrn(conv1, 4, bias=1.0, alpha=0.001/9, beta=0.75, name='lrn1')
     40     pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
     41                                padding='VALID', name='pool1')
     42 
     43     print_activations(pool1)
     44 
     45     with tf.name_scope('conv2') as scope:
     46         #定义第二个卷积层,卷积核尺寸为5x5,输入通道为64,卷积核数量为192
     47         kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192],
     48                             dtype=tf.float32, stddev=1e-1, name='weights'))
     49         #对输入的images进行卷积操作,strides步长设置为1x1
     50         conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
     51         #biases全部初始化为0
     52         biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),
     53                              trainable=True, name='biases')
     54         #将卷积结果conv和biases
     55         bias = tf.nn.bias_add(conv, biases)
     56         #rele对结果进行非线性处理
     57         conv2 = tf.nn.relu(bias, name=scope)
     58 
     59         #将这一层的参数kernel和biases添加到parameters
     60         parameters += [kernel, biases]
     61 
     62     print_activations(conv2)
     63     #LRN层,depth_radius=4,等都是AlexNet论文中推荐值
     64     lrn2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9, beta=0.75, name='lrn2')
     65     pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
     66                                padding='VALID', name='pool2')
     67 
     68     print_activations(pool2)
     69 
     70     with tf.name_scope('conv3') as scope:
     71         #定义第三个卷积层,卷积核尺寸为5x5,输入通道为64,卷积核数量为192
     72         kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
     73                             dtype=tf.float32, stddev=1e-1, name='weights'))
     74         #对输入的images进行卷积操作,strides步长设置为1x1
     75         conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
     76         #biases全部初始化为0
     77         biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
     78                              trainable=True, name='biases')
     79         #将卷积结果conv和biases
     80         bias = tf.nn.bias_add(conv, biases)
     81         #rele对结果进行非线性处理
     82         conv3 = tf.nn.relu(bias, name=scope)
     83 
     84         #将这一层的参数kernel和biases添加到parameters
     85         parameters += [kernel, biases]
     86 
     87         print_activations(conv3)
     88 
     89 
     90     with tf.name_scope('conv4') as scope:
     91         #定义第四个卷积层,卷积核尺寸为3x3,输入通道为384,卷积核数量为256
     92         kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
     93                             dtype=tf.float32, stddev=1e-1, name='weights'))
     94         #对输入的images进行卷积操作,strides步长设置为1x1
     95         conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
     96         #biases全部初始化为0
     97         biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
     98                              trainable=True, name='biases')
     99         #将卷积结果conv和biases
    100         bias = tf.nn.bias_add(conv, biases)
    101         #rele对结果进行非线性处理
    102         conv4 = tf.nn.relu(bias, name=scope)
    103 
    104         #将这一层的参数kernel和biases添加到parameters
    105         parameters += [kernel, biases]
    106 
    107         print_activations(conv4)
    108 
    109     with tf.name_scope('conv5') as scope:
    110         #定义第五个卷积层,卷积核尺寸为3x3,输入通道为256,卷积核数量为256
    111         kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
    112                             dtype=tf.float32, stddev=1e-1, name='weights'))
    113         #对输入的images进行卷积操作,strides步长设置为1x1
    114         conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
    115         #biases全部初始化为0
    116         biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
    117                              trainable=True, name='biases')
    118         #将卷积结果conv和biases
    119         bias = tf.nn.bias_add(conv, biases)
    120         #rele对结果进行非线性处理
    121         conv5 = tf.nn.relu(bias, name=scope)
    122 
    123         #将这一层的参数kernel和biases添加到parameters
    124         parameters += [kernel, biases]
    125 
    126         print_activations(conv5)
    127 
    128     pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
    129                            padding='VALID', name='pool5')
    130     print_activations(pool5)
    131     return pool5, parameters
    132 
    133 def time_tensorflow_run(session, target, info_string):
    134     '''评估AlexNet每轮计算时间
    135     target:评测的运算算子
    136     info_string:评测的名称'''
    137     num_steps_burn_in = 10#预热轮数,给程序热身
    138     total_duration = 0.0#总时间
    139     total_duration_squared =0.0#计算方差
    140 
    141     for i in range(num_batches + num_steps_burn_in):
    142 
    143         start_time = time.time()
    144         _ = session.run(target)
    145         duration = time.time()-start_time
    146         #在初始热身的num_steps_burn_in次迭代后每10轮显示当前迭代所需要的时间
    147         if i >= num_steps_burn_in:
    148             if not i%10:
    149                 print('%s: step %d, duration = %.3f' %
    150                       (datetime.now(), i-num_steps_burn_in, duration))
    151             total_duration +=duration
    152             total_duration_squared += duration*duration
    153         mn = total_duration/num_batches#每轮迭代平均耗时
    154         vr = total_duration_squared/num_batches-mn*mn
    155         #平均耗时标准差
    156         sd = math.sqrt(vr)
    157 
    158         print('%s: %s across %d steps, %.3f +/-%.3f sec/batch' %
    159               (datetime.now(), info_string, num_batches, mn, sd))
    160 
    161 def run_benchmark():
    162 
    163     with tf.Graph().as_default():
    164         image_size = 224
    165         '''batch_size:每轮迭代样本数
    166         image_size:图片尺寸
    167         3:图片颜色通道数'''
    168         images = tf.Variable(tf.random_normal([batch_size,
    169                                                image_size,
    170                                                image_size, 3],
    171                                                dtype=tf.float32,
    172                                                stddev=1e-1))
    173         pool5, parameters = interence(images)
    174 
    175         init = tf.global_variables_initializer()
    176         sess = tf.Session()
    177         sess.run(init)
    178 
    179         time_tensorflow_run(sess, pool5, "Forward")
    180 
    181         objective = tf.nn.l2_loss(pool5)
    182         grad = tf.gradients(objective, parameters)
    183         time_tensorflow_run(sess, grad, "Forward-backward")
    184 
    185 run_benchmark()
     1 conv1  [32, 56, 56, 64]
     2 pool1  [32, 27, 27, 64]
     3 conv2  [32, 27, 27, 192]
     4 pool2  [32, 13, 13, 192]
     5 conv3  [32, 13, 13, 384]
     6 conv4  [32, 13, 13, 256]
     7 conv5  [32, 13, 13, 256]
     8 pool5  [32, 6, 6, 256]
     9 2017-12-20 23:31:19.926000: step 990, duration = 0.197
    10 2017-12-20 23:31:19.926000: Forward-backward across 1000 steps, 0.196 +/-0.019 sec/batch
    11 2017-12-20 23:31:20.122000: Forward-backward across 1000 steps, 0.196 +/-0.018 sec/batch
    12 2017-12-20 23:31:20.319000: Forward-backward across 1000 steps, 0.197 +/-0.017 sec/batch
    13 2017-12-20 23:31:20.515000: Forward-backward across 1000 steps, 0.197 +/-0.016 sec/batch
    14 2017-12-20 23:31:20.711000: Forward-backward across 1000 steps, 0.197 +/-0.014 sec/batch
    15 2017-12-20 23:31:20.907000: Forward-backward across 1000 steps, 0.197 +/-0.013 sec/batch
    16 2017-12-20 23:31:21.104000: Forward-backward across 1000 steps, 0.197 +/-0.011 sec/batch
    17 2017-12-20 23:31:21.299000: Forward-backward across 1000 steps, 0.197 +/-0.010 sec/batch
    18 2017-12-20 23:31:21.494000: Forward-backward across 1000 steps, 0.198 +/-0.007 sec/batch
    19 2017-12-20 23:31:21.690000: Forward-backward across 1000 steps, 0.198 +/-0.004 sec/batch
  • 相关阅读:
    zbb20181207 springboot @ConfigurationProperties使用
    zbb20181206 logback,lombok 默认日志logback配置解析
    Spring Boot (8) 全局异常处理
    Spring Boot (7) JdbcTemplate访问数据库
    Spring Boot (6) Spring Data JPA
    Spring Boot (4) 静态页面和Thymeleaf模板
    Spring Boot (3) 热部署devtools
    Spring Boot (2) Restful风格接口
    Spring Boot (1) 构建第一个Spring Boot工程
    idea使用maven搭建ssm框架实现登陆商品增删改查
  • 原文地址:https://www.cnblogs.com/millerfu/p/8094854.html
Copyright © 2011-2022 走看看