zoukankan      html  css  js  c++  java
  • TensorFlow 框架学习

    一、TensorFlow基础使用

      创建、启动图

    1 #创建一个常量op
    2 m1 = tf.constant([[3,3]])
    3 #创建一个常量op
    4 m2 = tf.constant([[2],[3]])
    5 #创建一个矩阵乘法op,把m1和m2传入
    6 product = tf.matmul(m1,m2)
    7 print(product)

      

    1 #定义一个会话,自动启动默认图
    2 sess = tf.Session()
    3 #调用sess的run方法来执行矩阵乘法op
    4 #run(product)触发了图中3个op
    5 result = sess.run(product)
    6 print(result)
    7 sess.close()

      需要建立一个会话,才能得到两个常量的乘积

    1 #上面这段启动session可以改写成如下形式,此时就不用手动的去调用sess.close去关闭session
    2 with tf.Session() as sess:
    3     #调用sess的run方法来执行矩阵乘法op
    4     #run(product)触发了图中3个op
    5     result = sess.run(product)
    6     print(result)

      变量

    1 import tensorflow as tf
     1 #定义变量
     2 x = tf.Variable([1,2])
     3 a = tf.constant([3,3])
     4 #定义减法和加法的op
     5 sub = tf.subtract(x,a)
     6 add = tf.add(x,sub)
     7 
     8 #使用变量前要先初始化
     9 init = tf.global_variables_initializer()
    10 
    11 #定义会话
    12 with tf.Session() as sess:
    13     #先运行初始化变量
    14     sess.run(init)
    15 
    16     print(sess.run(sub))
    17     print(sess.run(add))
     1 #变量可以初始化,初始化的值为0,名字为counter
     2 state = tf.Variable(0,name='counter')
     3 
     4 new_value = tf.add(state,1)
     5 
     6 #调用赋值操作,不能直接用等号赋值
     7 update = tf.assign(state,new_value)
     8 
     9 #因为state是变量,所以需要初始化
    10 init = tf.global_variables_initializer()
    11 
    12 #定义会话
    13 with tf.Session() as sess:
    14     #先运行初始化变量
    15     sess.run(init)
    16     #查看初始化的值
    17     print(sess.run(state))
    18 
    19     for _ in range(5):
    20         sess.run(update)
    21         print(sess.run(state))
    1 import tensorflow as tf
     1 #Fetch: 可以同时运行多个op
     2 input1 = tf.constant(3.0)
     3 input2 = tf.constant(2.0)
     4 input3 = tf.constant(5.0)
     5 
     6 add = tf.add(input2,input3)
     7 mul = tf.multiply(input1,add)
     8 
     9 with tf.Session() as sess:
    10     #fetch多个op时,只要用中括号将多个op括起来即可
    11     result = sess.run([add,mul])
    12     print(result)
     1 #Feed
     2 #创建占位符 :placeholder
     3 input1 = tf.placeholder(tf.float32)
     4 input2 = tf.placeholder(tf.float32)
     5 
     6 #可以在运行output时,再将input1和input2的值传入,传入时采用字典的形式
     7 output = tf.multiply(input1,input2)
     8 with tf.Session() as sess:
     9     #传入时采用字典的形式
    10     print(sess.run(output,feed_dict={input1:[8.],input2:[3.]}))

      tensorflow的简单使用

    1 import tensorflow as tf
    2 import numpy as np
      1 #使用numpy生成100个随机点
      2 x_data = np.random.rand(100)
      3 y_data = x_data*0.1 + 0.2
      4 
      5 #构建一个线性模型
      6 b = tf.Variable(0.)
      7 k = tf.Variable(0.)
      8 y = x_data*k + b
      9 
     10 #使用tensorflow训练出模型,即得到k和b的值
     11 #定义二次代价函数
     12 loss = tf.reduce_mean(tf.square(y_data - y))
     13 #使用梯度下降函数来进行训练的优化器,下面学习率是0.2
     14 optimizer = tf.train.GradientDescentOptimizer(0.2)
     15 #最小化代价函数
     16 train = optimizer.minimize(loss)
     17 
     18 
     19 #初始化变量
     20 init = tf.global_variables_initializer()
     21 with tf.Session() as sess:
     22     #先运行初始化变量
     23     sess.run(init)
     24 
     25     for step in range(201):
     26         sess.run(train)
     27         if step%20 ==0 :
     28             print(step,sess.run([k,b]))
     29 1
     30 2
     31 3
     32 4
     33 5
     34 6
     35 7
     36 8
     37 9
     38 10
     39 11
     40 12
     41 13
     42 14
     43 15
     44 16
     45 17
     46 18
     47 19
     48 20
     49 21
     50 22
     51 23
     52 24
     53 25
     54 26
     55 27
     56 28
     57 1
     58 2
     59 3
     60 4
     61 5
     62 6
     63 7
     64 8
     65 9
     66 10
     67 11
     68 12
     69 13
     70 14
     71 15
     72 16
     73 17
     74 18
     75 19
     76 20
     77 21
     78 22
     79 23
     80 24
     81 25
     82 26
     83 27
     84 28
     85 0 [0.0517033, 0.099609137] 
     86 20 [0.1019343, 0.1989941] 
     87 40 [0.10121739, 0.19936697] 
     88 60 [0.10076618, 0.19960159] 
     89 80 [0.10048222, 0.19974925] 
     90 100 [0.1003035, 0.19984218] 
     91 120 [0.10019101, 0.19990067] 
     92 140 [0.10012019, 0.19993751] 
     93 160 [0.10007564, 0.19996066] 
     94 180 [0.10004761, 0.19997525] 
     95 200 [0.10002997, 0.19998442]
     96 
     97 tensorflow线性回归以及分类的简单实用,softmax介绍
     98 
     99 非线性回归
    100 
    101 import tensorflow as tf
    102 import numpy as np
    103 import matplotlib.pyplot as plt
    104 1
    105 2
    106 3
    107 1
    108 2
    109 3
    110 #使用numpy生成200个随机点
    111 #生成范围[-0.5,0.5]的200个点,下面生成200行1列的数据
    112 x_data = np.linspace(-0.5,0.5,200)[:,np.newaxis]
    113 noise = np.random.normal(0,0.02,x_data.shape)
    114 y_data = np.square(x_data) + noise
    115 
    116 
    117 #定义两个placeholder
    118 #下面[]里面表示行不确定,列只有一列
    119 x = tf.placeholder(tf.float32,[None,1])
    120 y = tf.placeholder(tf.float32,[None,1])
    121 
    122 #定义神经网络中间层
    123 #神经元定义为[1,10],是因为输入的神经元是1个,中间层的神经元定义了10个
    124 Weight_L1 = tf.Variable(tf.random_normal([1,10]))
    125 biases_L1 = tf.Variable(tf.zeros([1,10]))
    126 Wx_plus_b_L1 = tf.matmul(x,Weight_L1) + biases_L1
    127 L1 = tf.nn.tanh(Wx_plus_b_L1)
    128 
    129 #定义神经网络输出层
    130 Weight_L2 = tf.Variable(tf.random_normal([10,1]))
    131 biases_L2 = tf.Variable(tf.zeros([1,1]))
    132 Wx_plus_b_L2 = tf.matmul(L1,Weight_L2) + biases_L2
    133 prediction = tf.nn.tanh(Wx_plus_b_L2)
    134 
    135 
    136 #二次代价函数
    137 loss = tf.reduce_mean(tf.square(y-prediction))
    138 
    139 
    140 #使用梯度下降法训练
    141 train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
    142 
    143 
    144 with tf.Session() as sess:
    145     #变量初始化
    146     sess.run(tf.global_variables_initializer())
    147     for _ in range(2000):
    148         sess.run(train_step,feed_dict={x:x_data,y:y_data})
    149 
    150     #获得预测值
    151     prediction_value = sess.run(prediction, feed_dict={x:x_data})
    152 
    153     #画图
    154     plt.figure()
    155     plt.scatter(x_data,y_data)
    156     plt.plot(x_data,prediction_value,'r-',lw=5)
    157     plt.show()
    View Code

      MNIST数据集分类简单版

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #载入数据集
     2 #one_hot是将数据改成0-1的格式
     3 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     4 
     5 #每个批次的大小
     6 batch_size = 100
     7 
     8 #计算一共有多少个批次
     9 n_batch = mnist.train.num_examples // batch_size
    10 
    11 #定义两个placeholder,一个图片是28x28=784
    12 x = tf.placeholder(tf.float32,[None,784])
    13 y = tf.placeholder(tf.float32,[None,10])
    14 
    15 #创建一个简单的神经网络
    16 Weight_L1 = tf.Variable(tf.zeros([784,10]))
    17 biases_L1 = tf.Variable(tf.zeros([10]))
    18 Wx_plus_b_L1 = tf.matmul(x,Weight_L1) + biases_L1
    19 prediction = tf.nn.softmax(Wx_plus_b_L1)
    20 
    21 
    22 
    23 #二次代价函数
    24 loss = tf.reduce_mean(tf.square(y-prediction))
    25 
    26 #使用梯度下降法训练
    27 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    28 
    29 #初始化变量
    30 init = tf.global_variables_initializer()
    31 
    32 #结果存在一个布尔型的列表中
    33 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    34 
    35 #求准确率
    36 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    37 
    38 with tf.Session() as sess:
    39     sess.run(init)
    40 
    41     for epoch in range(21):
    42         for batch in range (n_batch):
    43             #获取每个batch的图片
    44             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    45             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
    46 
    47         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    48         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(acc))
    View Code
     1 #第三章作业,修改神经网络使得识别率达到95%以上
     2 #载入数据集
     3 #one_hot是将数据改成0-1的格式
     4 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     5 
     6 #每个批次的大小
     7 batch_size = 100
     8 
     9 #计算一共有多少个批次
    10 n_batch = mnist.train.num_examples // batch_size
    11 
    12 #定义两个placeholder,一个图片是28x28=784
    13 x = tf.placeholder(tf.float32,[None,784])
    14 y = tf.placeholder(tf.float32,[None,10])
    15 
    16 #创建一个简单的神经网络
    17 Weight_L1 = tf.Variable(tf.zeros([784,20]))
    18 biases_L1 = tf.Variable(tf.zeros([20]))
    19 Wx_plus_b_L1 = tf.matmul(x,Weight_L1) + biases_L1
    20 L1 = tf.nn.tanh(Wx_plus_b_L1)
    21 
    22 
    23 #定义神经网络输出层
    24 Weight_L2 = tf.Variable(tf.random_normal([20,10]))
    25 biases_L2 = tf.Variable(tf.zeros([10]))
    26 Wx_plus_b_L2 = tf.matmul(L1,Weight_L2) + biases_L2
    27 prediction = tf.nn.softmax(Wx_plus_b_L2)
    28 
    29 #二次代价函数
    30 loss = tf.reduce_mean(tf.square(y-prediction))
    31 
    32 #使用梯度下降法训练
    33 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    34 
    35 #初始化变量
    36 init = tf.global_variables_initializer()
    37 
    38 #结果存在一个布尔型的列表中
    39 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    40 
    41 #求准确率
    42 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    43 
    44 with tf.Session() as sess:
    45     sess.run(init)
    46 
    47     for epoch in range(100):
    48         for batch in range (n_batch):
    49             #获取每个batch的图片
    50             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    51             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
    52 
    53         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    54         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(acc))
    View Code

    二、交叉熵,过拟合,dropout以及tensorflow中各种优化器的介绍

    交叉熵

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #将二次代价函数替换成交叉熵
     2 # tf.nn.sigmod_cross_entropy_with_logits() 来表示跟sigmod搭配使用的交叉熵
     3 # tf.nn.softmax_cross_entropy_with_logits() 来表示跟softmax搭配使用的交叉熵
     4 #载入数据集
     5 #one_hot是将数据改成0-1的格式
     6 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     7 
     8 #每个批次的大小
     9 batch_size = 100
    10 
    11 #计算一共有多少个批次
    12 n_batch = mnist.train.num_examples // batch_size
    13 
    14 #定义两个placeholder,一个图片是28x28=784
    15 x = tf.placeholder(tf.float32,[None,784])
    16 y = tf.placeholder(tf.float32,[None,10])
    17 
    18 #创建一个简单的神经网络
    19 Weight_L1 = tf.Variable(tf.zeros([784,10]))
    20 biases_L1 = tf.Variable(tf.zeros([10]))
    21 Wx_plus_b_L1 = tf.matmul(x,Weight_L1) + biases_L1
    22 prediction = tf.nn.softmax(Wx_plus_b_L1)
    23 
    24 
    25 
    26 #二次代价函数
    27 #loss = tf.reduce_mean(tf.square(y-prediction))
    28 
    29 #交叉熵
    30 #首先看输入logits,它的shape是[batch_size, num_classes] ,一般来讲,就是神经网络最后一层的输入z。
    31 #另外一个输入是labels,它的shape也是[batch_size, num_classes],就是我们神经网络期望的输出。
    32 loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
    33 #使用梯度下降法训练
    34 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    35 
    36 #初始化变量
    37 init = tf.global_variables_initializer()
    38 
    39 #结果存在一个布尔型的列表中
    40 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    41 
    42 #求准确率
    43 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    44 
    45 with tf.Session() as sess:
    46     sess.run(init)
    47 
    48     for epoch in range(21):
    49         for batch in range (n_batch):
    50             #获取每个batch的图片
    51             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    52             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
    53 
    54         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    55         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(acc))
    View Code

      dropout

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #将二次代价函数替换成交叉熵
     2 # tf.nn.sigmod_cross_entropy_with_logits() 来表示跟sigmod搭配使用的交叉熵
     3 # tf.nn.softmax_cross_entropy_with_logits() 来表示跟softmax搭配使用的交叉熵
     4 #载入数据集
     5 #one_hot是将数据改成0-1的格式
     6 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     7 
     8 #每个批次的大小
     9 batch_size = 100
    10 
    11 #计算一共有多少个批次
    12 n_batch = mnist.train.num_examples // batch_size
    13 
    14 #定义两个placeholder,一个图片是28x28=784
    15 x = tf.placeholder(tf.float32,[None,784])
    16 y = tf.placeholder(tf.float32,[None,10])
    17 keep_prob = tf.placeholder(tf.float32) #dropout的参数,表示多少神经元被激活,1 表示所有神经元都是工作的
    18 
    19 #创建一个简单的神经网络
    20 Weight_L1 = tf.Variable(tf.truncated_normal([784,2000],stddev=0.1))
    21 biases_L1 = tf.Variable(tf.zeros([2000])+0.1)
    22 Wx_plus_b_L1 = tf.nn.tanh(tf.matmul(x,Weight_L1) + biases_L1)
    23 L1_drop = tf.nn.dropout(Wx_plus_b_L1,keep_prob)
    24 
    25 
    26 Weight_L2 = tf.Variable(tf.truncated_normal([2000,2000],stddev=0.1))
    27 biases_L2 = tf.Variable(tf.zeros([2000])+0.1)
    28 Wx_plus_b_L2 = tf.nn.tanh(tf.matmul(L1_drop,Weight_L2) + biases_L2)
    29 L2_drop = tf.nn.dropout(Wx_plus_b_L2,keep_prob)
    30 
    31 
    32 Weight_L3 = tf.Variable(tf.truncated_normal([2000,1000],stddev=0.1))
    33 biases_L3 = tf.Variable(tf.zeros([1000])+0.1)
    34 Wx_plus_b_L3 = tf.nn.tanh(tf.matmul(L2_drop,Weight_L3) + biases_L3)
    35 L3_drop = tf.nn.dropout(Wx_plus_b_L3,keep_prob)
    36 
    37 Weight_L4 = tf.Variable(tf.truncated_normal([1000,10],stddev=0.1))
    38 biases_L4 = tf.Variable(tf.zeros([10])+0.1)
    39 prediction = tf.nn.softmax(tf.matmul(L3_drop,Weight_L4) + biases_L4)
    40 
    41 #二次代价函数
    42 #loss = tf.reduce_mean(tf.square(y-prediction))
    43 
    44 #交叉熵
    45 #首先看输入logits,它的shape是[batch_size, num_classes] ,一般来讲,就是神经网络最后一层的输入z。
    46 #另外一个输入是labels,它的shape也是[batch_size, num_classes],就是我们神经网络期望的输出。
    47 loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
    48 #使用梯度下降法训练
    49 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    50 
    51 #初始化变量
    52 init = tf.global_variables_initializer()
    53 
    54 #结果存在一个布尔型的列表中
    55 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    56 
    57 #求准确率
    58 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    59 
    60 with tf.Session() as sess:
    61     sess.run(init)
    62 
    63     for epoch in range(21):
    64         for batch in range (n_batch):
    65             #获取每个batch的图片
    66             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    67             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
    68 
    69         test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
    70         train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
    71 
    72         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(test_acc)+", training Accuracy "+str(train_acc))
    View Code
    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #将二次代价函数替换成交叉熵
     2 # tf.nn.sigmod_cross_entropy_with_logits() 来表示跟sigmod搭配使用的交叉熵
     3 # tf.nn.softmax_cross_entropy_with_logits() 来表示跟softmax搭配使用的交叉熵
     4 #载入数据集
     5 #one_hot是将数据改成0-1的格式
     6 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     7 
     8 #每个批次的大小
     9 batch_size = 100
    10 
    11 #计算一共有多少个批次
    12 n_batch = mnist.train.num_examples // batch_size
    13 
    14 #定义两个placeholder,一个图片是28x28=784
    15 x = tf.placeholder(tf.float32,[None,784])
    16 y = tf.placeholder(tf.float32,[None,10])
    17 
    18 #创建一个简单的神经网络
    19 Weight_L1 = tf.Variable(tf.zeros([784,10]))
    20 biases_L1 = tf.Variable(tf.zeros([10]))
    21 Wx_plus_b_L1 = tf.matmul(x,Weight_L1) + biases_L1
    22 prediction = tf.nn.softmax(Wx_plus_b_L1)
    23 
    24 
    25 
    26 #二次代价函数
    27 #loss = tf.reduce_mean(tf.square(y-prediction))
    28 
    29 #交叉熵
    30 #首先看输入logits,它的shape是[batch_size, num_classes] ,一般来讲,就是神经网络最后一层的输入z。
    31 #另外一个输入是labels,它的shape也是[batch_size, num_classes],就是我们神经网络期望的输出。
    32 loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
    33 
    34 #使用梯度下降法训练
    35 #train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    36 #使用其他优化器
    37 train_step = tf.train.AdamOptimizer(1e-2).minimize(loss)
    38 
    39 #初始化变量
    40 init = tf.global_variables_initializer()
    41 
    42 #结果存在一个布尔型的列表中
    43 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    44 
    45 #求准确率
    46 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    47 
    48 with tf.Session() as sess:
    49     sess.run(init)
    50 
    51     for epoch in range(21):
    52         for batch in range (n_batch):
    53             #获取每个batch的图片
    54             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    55             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
    56 
    57         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    58         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(acc))
    View Code

      继续优化网络,提高识别率

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #将二次代价函数替换成交叉熵
     2 # tf.nn.sigmod_cross_entropy_with_logits() 来表示跟sigmod搭配使用的交叉熵
     3 # tf.nn.softmax_cross_entropy_with_logits() 来表示跟softmax搭配使用的交叉熵
     4 #载入数据集
     5 #one_hot是将数据改成0-1的格式
     6 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     7 
     8 #每个批次的大小
     9 batch_size = 100
    10 
    11 #计算一共有多少个批次
    12 n_batch = mnist.train.num_examples // batch_size
    13 
    14 #定义两个placeholder,一个图片是28x28=784
    15 x = tf.placeholder(tf.float32,[None,784])
    16 y = tf.placeholder(tf.float32,[None,10])
    17 keep_prob = tf.placeholder(tf.float32) #dropout的参数,表示多少神经元被激活,1 表示所有神经元都是工作的
    18 #学习率的变量
    19 lr = tf.Variable(0.001,dtype = tf.float32)
    20 
    21 #创建一个简单的神经网络
    22 Weight_L1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1))
    23 biases_L1 = tf.Variable(tf.zeros([500])+0.1)
    24 Wx_plus_b_L1 = tf.nn.tanh(tf.matmul(x,Weight_L1) + biases_L1)
    25 L1_drop = tf.nn.dropout(Wx_plus_b_L1,keep_prob)
    26 
    27 
    28 Weight_L2 = tf.Variable(tf.truncated_normal([500,300],stddev=0.1))
    29 biases_L2 = tf.Variable(tf.zeros([300])+0.1)
    30 Wx_plus_b_L2 = tf.nn.tanh(tf.matmul(L1_drop,Weight_L2) + biases_L2)
    31 L2_drop = tf.nn.dropout(Wx_plus_b_L2,keep_prob)
    32 
    33 
    34 Weight_L3 = tf.Variable(tf.truncated_normal([300,10],stddev=0.1))
    35 biases_L3 = tf.Variable(tf.zeros([10])+0.1)
    36 #Wx_plus_b_L3 = tf.nn.tanh(tf.matmul(L2_drop,Weight_L3) + biases_L3)
    37 #L3_drop = tf.nn.dropout(Wx_plus_b_L3,keep_prob)
    38 
    39 #Weight_L4 = tf.Variable(tf.truncated_normal([300,10],stddev=0.1))
    40 #biases_L4 = tf.Variable(tf.zeros([10])+0.1)
    41 prediction = tf.nn.softmax(tf.matmul(L2_drop,Weight_L3) + biases_L3)
    42 
    43 #二次代价函数
    44 #loss = tf.reduce_mean(tf.square(y-prediction))
    45 
    46 #交叉熵
    47 #首先看输入logits,它的shape是[batch_size, num_classes] ,一般来讲,就是神经网络最后一层的输入z。
    48 #另外一个输入是labels,它的shape也是[batch_size, num_classes],就是我们神经网络期望的输出。
    49 loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
    50 #使用梯度下降法训练
    51 #train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    52 #使用其他优化器
    53 train_step = tf.train.AdamOptimizer(lr).minimize(loss)
    54 
    55 
    56 #初始化变量
    57 init = tf.global_variables_initializer()
    58 
    59 #结果存在一个布尔型的列表中
    60 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    61 
    62 #求准确率
    63 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    64 
    65 with tf.Session() as sess:
    66     sess.run(init)
    67 
    68     for epoch in range(51):
    69         sess.run(tf.assign(lr,0.001 * (0.95 ** epoch)))
    70         for batch in range (n_batch):
    71             #获取每个batch的图片
    72             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    73             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
    74 
    75         learning_rate = sess.run(lr)
    76 
    77         test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
    78         train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
    79 
    80         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(test_acc)+", training Accuracy "+str(train_acc))
    View Code

    三、tensorboard可视化

        tensorboard网络结构

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #以3-2的程序为例介绍tensorboard的用法
     2 #载入数据集
     3 #one_hot是将数据改成0-1的格式
     4 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     5 
     6 #每个批次的大小
     7 batch_size = 100
     8 
     9 #计算一共有多少个批次
    10 n_batch = mnist.train.num_examples // batch_size
    11 
    12 
    13 #*****要可视化网络结果,必须要用到命名空间***********
    14 with tf.name_scope('input'):
    15     #定义两个placeholder,一个图片是28x28=784
    16     x = tf.placeholder(tf.float32,[None,784],name='x-input')
    17     y = tf.placeholder(tf.float32,[None,10],name='y-input')
    18 
    19 #创建一个简单的神经网络
    20 with tf.name_scope('layer'):
    21     with tf.name_scope('wights'):
    22         Weight_L1 = tf.Variable(tf.zeros([784,10]),name='W')
    23     with tf.name_scope('biases'):
    24         biases_L1 = tf.Variable(tf.zeros([10]),name='b')
    25     with tf.name_scope('wx_plus_b'):
    26         Wx_plus_b_L1 = tf.matmul(x,Weight_L1) + biases_L1
    27     with tf.name_scope('softmax'):
    28         prediction = tf.nn.softmax(Wx_plus_b_L1)
    29 
    30 
    31 
    32 #二次代价函数
    33 #loss = tf.reduce_mean(tf.square(y-prediction))
    34 with tf.name_scope('loss'):
    35     loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
    36 #使用梯度下降法训练
    37 with tf.name_scope('train'):
    38     train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    39 
    40 #初始化变量
    41 init = tf.global_variables_initializer()
    42 
    43 #结果存在一个布尔型的列表中
    44 with tf.name_scope('accuracy'):
    45     with tf.name_scope('correct_prediction'):
    46         correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    47     with tf.name_scope('accuracy'):
    48         #求准确率
    49         accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    50 
    51 with tf.Session() as sess:
    52     sess.run(init)
    53     writer = tf.summary.FileWriter('C:/Users/zgyxf183/Desktop/logs',sess.graph)
    54     for epoch in range(1):
    55         for batch in range (n_batch):
    56             #获取每个batch的图片
    57             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    58             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
    59 
    60         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    61         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(acc))
    View Code

      tensorboard网络运行

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #载入数据集
     2 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
     3 
     4 #每个批次的大小
     5 batch_size = 100
     6 #计算一共有多少个批次
     7 n_batch = mnist.train.num_examples // batch_size
     8 
     9 #参数概要
    10 def variable_summaries(var):
    11     with tf.name_scope('summaries'):
    12         mean = tf.reduce_mean(var)
    13         tf.summary.scalar('mean', mean)#平均值
    14         with tf.name_scope('stddev'):
    15             stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    16         tf.summary.scalar('stddev', stddev)#标准差
    17         tf.summary.scalar('max', tf.reduce_max(var))#最大值
    18         tf.summary.scalar('min', tf.reduce_min(var))#最小值
    19         tf.summary.histogram('histogram', var)#直方图
    20 
    21 #命名空间
    22 with tf.name_scope('input'):
    23     #定义两个placeholder
    24     x = tf.placeholder(tf.float32,[None,784],name='x-input')
    25     y = tf.placeholder(tf.float32,[None,10],name='y-input')
    26 
    27 with tf.name_scope('layer'):
    28     #创建一个简单的神经网络
    29     with tf.name_scope('wights'):
    30         W = tf.Variable(tf.zeros([784,10]),name='W')
    31         variable_summaries(W)
    32     with tf.name_scope('biases'):    
    33         b = tf.Variable(tf.zeros([10]),name='b')
    34         variable_summaries(b)
    35     with tf.name_scope('wx_plus_b'):
    36         wx_plus_b = tf.matmul(x,W) + b
    37     with tf.name_scope('softmax'):
    38         prediction = tf.nn.softmax(wx_plus_b)
    39 
    40 #二次代价函数
    41 # loss = tf.reduce_mean(tf.square(y-prediction))
    42 with tf.name_scope('loss'):
    43     loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
    44     tf.summary.scalar('loss',loss)
    45 with tf.name_scope('train'):
    46     #使用梯度下降法
    47     train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    48 
    49 #初始化变量
    50 init = tf.global_variables_initializer()
    51 
    52 with tf.name_scope('accuracy'):
    53     with tf.name_scope('correct_prediction'):
    54         #结果存放在一个布尔型列表中
    55         correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
    56     with tf.name_scope('accuracy'):
    57         #求准确率
    58         accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    59         tf.summary.scalar('accuracy',accuracy)
    60 
    61 #合并所有的summary
    62 merged = tf.summary.merge_all()
    63 
    64 with tf.Session() as sess:
    65     sess.run(init)
    66     writer = tf.summary.FileWriter('logs/',sess.graph)
    67     for epoch in range(51):
    68         for batch in range(n_batch):
    69             batch_xs,batch_ys =  mnist.train.next_batch(batch_size)
    70             summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})
    71 
    72         writer.add_summary(summary,epoch)
    73         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    74         print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
    View Code

      tensorboard可视化

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
    3 from tensorflow.contrib.tensorboard.plugins import projector
      1 #载入数据集
      2 mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
      3 #运行次数
      4 max_steps = 1001
      5 #图片数量
      6 image_num = 3000
      7 #文件路径
      8 DIR = "C:/Users/zgyxf183/Documents/jupyter/tensorFlow Learning/"
      9 
     10 #定义会话
     11 sess = tf.Session()
     12 
     13 #载入图片
     14 embedding = tf.Variable(tf.stack(mnist.test.images[:image_num]), trainable=False, name='embedding')
     15 
     16 #参数概要
     17 def variable_summaries(var):
     18     with tf.name_scope('summaries'):
     19         mean = tf.reduce_mean(var)
     20         tf.summary.scalar('mean', mean)#平均值
     21         with tf.name_scope('stddev'):
     22             stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
     23         tf.summary.scalar('stddev', stddev)#标准差
     24         tf.summary.scalar('max', tf.reduce_max(var))#最大值
     25         tf.summary.scalar('min', tf.reduce_min(var))#最小值
     26         tf.summary.histogram('histogram', var)#直方图
     27 
     28 #命名空间
     29 with tf.name_scope('input'):
     30     #这里的none表示第一个维度可以是任意的长度
     31     x = tf.placeholder(tf.float32,[None,784],name='x-input')
     32     #正确的标签
     33     y = tf.placeholder(tf.float32,[None,10],name='y-input')
     34 
     35 #显示图片
     36 with tf.name_scope('input_reshape'):
     37     image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
     38     tf.summary.image('input', image_shaped_input, 10)
     39 
     40 with tf.name_scope('layer'):
     41     #创建一个简单神经网络
     42     with tf.name_scope('weights'):
     43         W = tf.Variable(tf.zeros([784,10]),name='W')
     44         variable_summaries(W)
     45     with tf.name_scope('biases'):
     46         b = tf.Variable(tf.zeros([10]),name='b')
     47         variable_summaries(b)
     48     with tf.name_scope('wx_plus_b'):
     49         wx_plus_b = tf.matmul(x,W) + b
     50     with tf.name_scope('softmax'):    
     51         prediction = tf.nn.softmax(wx_plus_b)
     52 
     53 with tf.name_scope('loss'):
     54     #交叉熵代价函数
     55     loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
     56     tf.summary.scalar('loss',loss)
     57 with tf.name_scope('train'):
     58     #使用梯度下降法
     59     train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
     60 
     61 #初始化变量
     62 sess.run(tf.global_variables_initializer())
     63 
     64 with tf.name_scope('accuracy'):
     65     with tf.name_scope('correct_prediction'):
     66         #结果存放在一个布尔型列表中
     67         correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
     68     with tf.name_scope('accuracy'):
     69         #求准确率
     70         accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#把correct_prediction变为float32类型
     71         tf.summary.scalar('accuracy',accuracy)
     72 
     73 #产生metadata文件
     74 if tf.gfile.Exists(DIR + 'projector/projector/metadata.tsv'):
     75     tf.gfile.DeleteRecursively(DIR + 'projector/projector/metadata.tsv')
     76 with open(DIR + 'projector/projector/metadata.tsv', 'w') as f:
     77     labels = sess.run(tf.argmax(mnist.test.labels[:],1))
     78     for i in range(image_num):   
     79         f.write(str(labels[i]) + '\n')        
     80 
     81 #合并所有的summary
     82 merged = tf.summary.merge_all()   
     83 
     84 
     85 projector_writer = tf.summary.FileWriter(DIR + 'projector/projector',sess.graph)
     86 saver = tf.train.Saver()
     87 config = projector.ProjectorConfig()
     88 embed = config.embeddings.add()
     89 embed.tensor_name = embedding.name
     90 embed.metadata_path = DIR + 'projector/projector/metadata.tsv'
     91 embed.sprite.image_path = DIR + 'projector/data/mnist_10k_sprite.png'
     92 embed.sprite.single_image_dim.extend([28,28])
     93 projector.visualize_embeddings(projector_writer,config)
     94 
     95 for i in range(max_steps):
     96     #每个批次100个样本
     97     batch_xs,batch_ys = mnist.train.next_batch(100)
     98     run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
     99     run_metadata = tf.RunMetadata()
    100     summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys},options=run_options,run_metadata=run_metadata)
    101     projector_writer.add_run_metadata(run_metadata, 'step%03d' % i)
    102     projector_writer.add_summary(summary, i)
    103 
    104     if i%100 == 0:
    105         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    106         print ("Iter " + str(i) + ", Testing Accuracy= " + str(acc))
    107 
    108 saver.save(sess, DIR + 'projector/projector/a_model.ckpt', global_step=max_steps)
    109 projector_writer.close()
    110 sess.close()
    View Code

    四、卷积神经网络

    卷积神经网络应用于MNIST数据集分类

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
      1 #将二次代价函数替换成交叉熵
      2 # tf.nn.sigmod_cross_entropy_with_logits() 来表示跟sigmod搭配使用的交叉熵
      3 # tf.nn.softmax_cross_entropy_with_logits() 来表示跟softmax搭配使用的交叉熵
      4 #载入数据集
      5 #one_hot是将数据改成0-1的格式
      6 mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
      7 
      8 #每个批次的大小
      9 batch_size = 100
     10 
     11 #计算一共有多少个批次
     12 n_batch = mnist.train.num_examples // batch_size
     13 
     14 #初始化权值
     15 def weight_variable(shape):
     16     initial = tf.truncated_normal(shape,stddev=0.1)#生成一个截断的正态分布
     17     return tf.Variable(initial)
     18 
     19 #初始化偏移量
     20 def bias_variable(shape):
     21     initial = tf.constant(0.1,shape=shape)
     22     return tf.Variable(initial)
     23 
     24 
     25 #定义卷积层
     26 def conv2d(x,W):
     27     return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
     28 
     29 #定义池化层
     30 def max_pool_2x2(x):
     31     return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
     32 
     33 
     34 
     35 #定义两个placeholder,一个图片是28x28=784
     36 x = tf.placeholder(tf.float32,[None,784])
     37 y = tf.placeholder(tf.float32,[None,10])
     38 
     39 #改变x的格式转为4D的向量[batch, in_height, in_width, in_channels]`
     40 #输入的数据是784行的一列数据,要转换成 28x28
     41 x_image = tf.reshape(x,[-1,28,28,1])
     42 
     43 #初始化第一个卷积层的权值和偏置
     44 W_conv1 = weight_variable([5,5,1,32])#5*5的采样窗口,32个卷积核从1个平面抽取特征
     45 b_conv1 = bias_variable([32])#每一个卷积核一个偏置值
     46 
     47 
     48 #把x_image和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
     49 h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)
     50 h_pool1 = max_pool_2x2(h_conv1)
     51 
     52 #初始化第二个卷积层的权值和偏置
     53 W_conv2 = weight_variable([5,5,32,64])#5*5的采样窗口,64个卷积核从32个平面抽取特征
     54 b_conv2 = bias_variable([64])#每一个卷积核一个偏置值
     55 
     56 #把h_pool1和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
     57 h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)
     58 h_pool2 = max_pool_2x2(h_conv2)#进行max-pooling
     59 
     60 #28*28的图片第一次卷积后还是28*28,第一次池化后变为14*14
     61 #第二次卷积后为14*14,第二次池化后变为了7*7
     62 #进过上面操作后得到64张7*7的平面
     63 
     64 
     65 
     66 #初始化第一个全连接层的权值
     67 W_fc1 = weight_variable([7*7*64,1024])#上一层有7*7*64个神经元,全连接层有1024个神经元
     68 b_fc1 = bias_variable([1024])#1024个节点
     69 
     70 
     71 #把池化层2的输出扁平化为1维
     72 h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
     73 
     74 #求第一个全连接层的输出
     75 h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
     76 
     77 
     78 #keep_prob用来表示神经元的输出概率
     79 keep_prob = tf.placeholder(tf.float32)
     80 h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
     81 
     82 #初始化第二个全连接层
     83 W_fc2 = weight_variable([1024,10])
     84 b_fc2 = bias_variable([10])
     85 
     86 #计算输出
     87 prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2) + b_fc2)
     88 
     89 #二次代价函数
     90 #loss = tf.reduce_mean(tf.square(y-prediction))
     91 
     92 #交叉熵
     93 #首先看输入logits,它的shape是[batch_size, num_classes] ,一般来讲,就是神经网络最后一层的输入z。
     94 #另外一个输入是labels,它的shape也是[batch_size, num_classes],就是我们神经网络期望的输出。
     95 loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
     96 #使用梯度下降法训练
     97 #train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
     98 #使用其他优化器
     99 train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
    100 
    101 
    102 #初始化变量
    103 init = tf.global_variables_initializer()
    104 
    105 #结果存在一个布尔型的列表中
    106 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #argmax返回一个张量中最大值存在的位置
    107 
    108 #求准确率
    109 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #将布尔型转换为float32, true->1 false ->0
    110 
    111 with tf.Session() as sess:
    112     sess.run(init)
    113 
    114     for epoch in range(51):
    115 
    116         for batch in range (n_batch):
    117             #获取每个batch的图片
    118             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
    119             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})
    120 
    121         #learning_rate = sess.run(lr)
    122 
    123         test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
    124         train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
    125 
    126         print("Iter " + str(epoch) + ",Testing Accuracy "+ str(test_acc)+", training Accuracy "+str(train_acc))
    View Code

      卷积神经网络可视化

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
      1 mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
      2 
      3 #每个批次的大小
      4 batch_size = 100
      5 #计算一共有多少个批次
      6 n_batch = mnist.train.num_examples // batch_size
      7 
      8 #参数概要
      9 def variable_summaries(var):
     10     with tf.name_scope('summaries'):
     11         mean = tf.reduce_mean(var)
     12         tf.summary.scalar('mean', mean)#平均值
     13         with tf.name_scope('stddev'):
     14             stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
     15         tf.summary.scalar('stddev', stddev)#标准差
     16         tf.summary.scalar('max', tf.reduce_max(var))#最大值
     17         tf.summary.scalar('min', tf.reduce_min(var))#最小值
     18         tf.summary.histogram('histogram', var)#直方图
     19 
     20 #初始化权值
     21 def weight_variable(shape,name):
     22     initial = tf.truncated_normal(shape,stddev=0.1)#生成一个截断的正态分布
     23     return tf.Variable(initial,name=name)
     24 
     25 #初始化偏置
     26 def bias_variable(shape,name):
     27     initial = tf.constant(0.1,shape=shape)
     28     return tf.Variable(initial,name=name)
     29 
     30 #卷积层
     31 def conv2d(x,W):
     32     #x input tensor of shape `[batch, in_height, in_width, in_channels]`
     33     #W filter / kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]
     34     #`strides[0] = strides[3] = 1`. strides[1]代表x方向的步长,strides[2]代表y方向的步长
     35     #padding: A `string` from: `"SAME", "VALID"`
     36     return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
     37 
     38 #池化层
     39 def max_pool_2x2(x):
     40     #ksize [1,x,y,1]
     41     return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
     42 
     43 #命名空间
     44 with tf.name_scope('input'):
     45     #定义两个placeholder
     46     x = tf.placeholder(tf.float32,[None,784],name='x-input')
     47     y = tf.placeholder(tf.float32,[None,10],name='y-input')
     48     with tf.name_scope('x_image'):
     49         #改变x的格式转为4D的向量[batch, in_height, in_width, in_channels]`
     50         x_image = tf.reshape(x,[-1,28,28,1],name='x_image')
     51 
     52 
     53 with tf.name_scope('Conv1'):
     54     #初始化第一个卷积层的权值和偏置
     55     with tf.name_scope('W_conv1'):
     56         W_conv1 = weight_variable([5,5,1,32],name='W_conv1')#5*5的采样窗口,32个卷积核从1个平面抽取特征
     57     with tf.name_scope('b_conv1'):  
     58         b_conv1 = bias_variable([32],name='b_conv1')#每一个卷积核一个偏置值
     59 
     60     #把x_image和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
     61     with tf.name_scope('conv2d_1'):
     62         conv2d_1 = conv2d(x_image,W_conv1) + b_conv1
     63     with tf.name_scope('relu'):
     64         h_conv1 = tf.nn.relu(conv2d_1)
     65     with tf.name_scope('h_pool1'):
     66         h_pool1 = max_pool_2x2(h_conv1)#进行max-pooling
     67 
     68 with tf.name_scope('Conv2'):
     69     #初始化第二个卷积层的权值和偏置
     70     with tf.name_scope('W_conv2'):
     71         W_conv2 = weight_variable([5,5,32,64],name='W_conv2')#5*5的采样窗口,64个卷积核从32个平面抽取特征
     72     with tf.name_scope('b_conv2'):  
     73         b_conv2 = bias_variable([64],name='b_conv2')#每一个卷积核一个偏置值
     74 
     75     #把h_pool1和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
     76     with tf.name_scope('conv2d_2'):
     77         conv2d_2 = conv2d(h_pool1,W_conv2) + b_conv2
     78     with tf.name_scope('relu'):
     79         h_conv2 = tf.nn.relu(conv2d_2)
     80     with tf.name_scope('h_pool2'):
     81         h_pool2 = max_pool_2x2(h_conv2)#进行max-pooling
     82 
     83 #28*28的图片第一次卷积后还是28*28,第一次池化后变为14*14
     84 #第二次卷积后为14*14,第二次池化后变为了7*7
     85 #进过上面操作后得到64张7*7的平面
     86 
     87 with tf.name_scope('fc1'):
     88     #初始化第一个全连接层的权值
     89     with tf.name_scope('W_fc1'):
     90         W_fc1 = weight_variable([7*7*64,1024],name='W_fc1')#上一场有7*7*64个神经元,全连接层有1024个神经元
     91     with tf.name_scope('b_fc1'):
     92         b_fc1 = bias_variable([1024],name='b_fc1')#1024个节点
     93 
     94     #把池化层2的输出扁平化为1维
     95     with tf.name_scope('h_pool2_flat'):
     96         h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64],name='h_pool2_flat')
     97     #求第一个全连接层的输出
     98     with tf.name_scope('wx_plus_b1'):
     99         wx_plus_b1 = tf.matmul(h_pool2_flat,W_fc1) + b_fc1
    100     with tf.name_scope('relu'):
    101         h_fc1 = tf.nn.relu(wx_plus_b1)
    102 
    103     #keep_prob用来表示神经元的输出概率
    104     with tf.name_scope('keep_prob'):
    105         keep_prob = tf.placeholder(tf.float32,name='keep_prob')
    106     with tf.name_scope('h_fc1_drop'):
    107         h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob,name='h_fc1_drop')
    108 
    109 with tf.name_scope('fc2'):
    110     #初始化第二个全连接层
    111     with tf.name_scope('W_fc2'):
    112         W_fc2 = weight_variable([1024,10],name='W_fc2')
    113     with tf.name_scope('b_fc2'):    
    114         b_fc2 = bias_variable([10],name='b_fc2')
    115     with tf.name_scope('wx_plus_b2'):
    116         wx_plus_b2 = tf.matmul(h_fc1_drop,W_fc2) + b_fc2
    117     with tf.name_scope('softmax'):
    118         #计算输出
    119         prediction = tf.nn.softmax(wx_plus_b2)
    120 
    121 #交叉熵代价函数
    122 with tf.name_scope('cross_entropy'):
    123     cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction),name='cross_entropy')
    124     tf.summary.scalar('cross_entropy',cross_entropy)
    125 
    126 #使用AdamOptimizer进行优化
    127 with tf.name_scope('train'):
    128     train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    129 
    130 #求准确率
    131 with tf.name_scope('accuracy'):
    132     with tf.name_scope('correct_prediction'):
    133         #结果存放在一个布尔列表中
    134         correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))#argmax返回一维张量中最大的值所在的位置
    135     with tf.name_scope('accuracy'):
    136         #求准确率
    137         accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    138         tf.summary.scalar('accuracy',accuracy)
    139 
    140 #合并所有的summary
    141 merged = tf.summary.merge_all()
    142 
    143 with tf.Session() as sess:
    144     sess.run(tf.global_variables_initializer())
    145     train_writer = tf.summary.FileWriter('logs/train',sess.graph)
    146     test_writer = tf.summary.FileWriter('logs/test',sess.graph)
    147     for i in range(1001):
    148         #训练模型
    149         batch_xs,batch_ys =  mnist.train.next_batch(batch_size)
    150         sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.5})
    151         #记录训练集计算的参数
    152         summary = sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
    153         train_writer.add_summary(summary,i)
    154         #记录测试集计算的参数
    155         batch_xs,batch_ys =  mnist.test.next_batch(batch_size)
    156         summary = sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
    157         test_writer.add_summary(summary,i)
    158 
    159         if i%100==0:
    160             test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
    161             train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images[:10000],y:mnist.train.labels[:10000],keep_prob:1.0})
    162             print ("Iter " + str(i) + ", Testing Accuracy= " + str(test_acc) + ", Training Accuracy= " + str(train_acc))
    View Code

      LSTM

    1 import tensorflow as tf
    2 from tensorflow.examples.tutorials.mnist import input_data
     1 #载入数据集
     2 mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
     3 
     4 # 输入图片是28*28
     5 n_inputs = 28 #输入一行,一行有28个数据
     6 max_time = 28 #一共28行
     7 lstm_size = 100 #隐层单元
     8 n_classes = 10 # 10个分类
     9 batch_size = 50 #每批次50个样本
    10 n_batch = mnist.train.num_examples // batch_size #计算一共有多少个批次
    11 
    12 #这里的none表示第一个维度可以是任意的长度
    13 x = tf.placeholder(tf.float32,[None,784])
    14 #正确的标签
    15 y = tf.placeholder(tf.float32,[None,10])
    16 
    17 #初始化权值
    18 weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1))
    19 #初始化偏置值
    20 biases = tf.Variable(tf.constant(0.1, shape=[n_classes]))
    21 
    22 
    23 #定义RNN网络
    24 def RNN(X,weights,biases):
    25     # inputs=[batch_size, max_time, n_inputs]
    26     inputs = tf.reshape(X,[-1,max_time,n_inputs])
    27     #定义LSTM基本CELL
    28     lstm_cell = tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(lstm_size)
    29 #    final_state[state, batch_size, cell.state_size]
    30 #    final_state[0]是cell state
    31 #    final_state[1]是hidden_state
    32 #    outputs: The RNN output `Tensor`.
    33 #       If time_major == False (default), this will be a `Tensor` shaped:
    34 #         `[batch_size, max_time, cell.output_size]`.
    35 #       If time_major == True, this will be a `Tensor` shaped:
    36 #         `[max_time, batch_size, cell.output_size]`.
    37     outputs,final_state = tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)
    38     results = tf.nn.softmax(tf.matmul(final_state[1],weights) + biases)
    39     return results
    40 
    41 
    42 #计算RNN的返回结果
    43 prediction= RNN(x, weights, biases)  
    44 #损失函数
    45 cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
    46 #使用AdamOptimizer进行优化
    47 train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    48 #结果存放在一个布尔型列表中
    49 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
    50 #求准确率
    51 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#把correct_prediction变为float32类型
    52 #初始化
    53 init = tf.global_variables_initializer()
    54 
    55 with tf.Session() as sess:
    56     sess.run(init)
    57     for epoch in range(6):
    58         for batch in range(n_batch):
    59             batch_xs,batch_ys =  mnist.train.next_batch(batch_size)
    60             sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
    61 
    62         acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
    63         print ("Iter " + str(epoch) + ", Testing Accuracy= " + str(acc))
    View Code
  • 相关阅读:
    白盒测试笔记之:Junit 单元测试以及测试覆盖率
    Bootstrap+JSP实例学习笔记一.简单的带登录功能的首页
    一位普通测试人2018年简单回顾
    Web API学习笔记(Python实现)
    web安全测试--sql注入攻击
    电源分配系统及电源完整性
    FPGA Timing笔记
    使用arm开发板搭建无线mesh网络(二)
    使用arm开发板搭建无线mesh网络(一)
    arm tiny6410双网卡桥接问题
  • 原文地址:https://www.cnblogs.com/kang06/p/9373600.html
Copyright © 2011-2022 走看看