神经网络主要是存在一个前向传播的过程,我们的目的也是使得代价函数值最小化
采用的数据是minist数据,训练集为50000*28*28 测试集为10000*28*28 lable 为50000*10, 10分类, 每一个结果对应一个label值
第一步: 导入数据
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('data/', one_hot=True)
# 第二步:初始化参数
n_hidden_1 = 256 n_hidden_2 = 128 n_input = 784 n_classes = 10 x = tf.placeholder('float', [None, n_input]) y = tf.placeholder('float', [None, n_classes]) stddev = 0.1 # 初始化变量w,stddev=stddev,使得标准差为0.1, weights = { 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)), 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)), 'out':tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev)) } # 初始化变量b biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 'out': tf.Variable(tf.random_normal([n_classes])) }
第三步: 构造基本函数(向前传播函数)和cost,构造优化函数
# 构造基本函数 # 神经网络的前向传播 def multilayer_perceptron(_X, _weights, _biases): layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1'])) layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2'])) return (tf.matmul(layer_2, _weights['out']) + _biases['out']) # 前向传播 pred = multilayer_perceptron(x, weights, biases) #构造损失函数 cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y)) # 构造优化模型,使得损失值最小 optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost) # 计算预测精度 corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accr = tf.reduce_mean(tf.cast(corr, 'float'))
第四步:迭代优化参数
# 初始化 init = tf.global_variables_initializer() # 训练次数 train_epoches = 50 # 每次抽取样本数 batch_size = 100 # 每5次循环打印一次结果 display_step = 5 sess = tf.Session() sess.run(init) for train_epoch in range(train_epoches): avg_cost = 0 # 每次选取100个数据,循环的次数 num_batch = int(mnist.train.num_examples/batch_size) for i in range(num_batch): # 取出数据 bacth_x, bacth_y = mnist.train.next_batch(batch_size) # 进行cost优化 sess.run(optm, feed_dict={x:bacth_x, y:bacth_y}) # 加上cost的值 feeds = {x:bacth_x, y:bacth_y} avg_cost += sess.run(cost, feed_dict=feeds)/num_batch # 每5次打印一次精度结果 if (train_epoch+1) % display_step == 0: feeds_train = {x:bacth_x, y:bacth_y} feed_test = {x:mnist.test.images, y:mnist.test.labels} # 计算训练集的准确率, feed_dict的参数 train_acc = sess.run(accr, feed_dict=feeds_train) # 计算测试集的准确率 test_acc = sess.run(accr, feed_dict=feed_test) print("Epoch: %03d/%03d cost: %.9f train_acc: %.3f test_acc: %.3f" % (train_epoch, train_epoches, avg_cost, train_acc, test_acc))