import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 OUTPUT_NODE = 10 LAYER1_NODE = 500 def get_weight_variable(shape, regularizer): weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1)) if(regularizer != None): tf.add_to_collection('losses', regularizer(weights)) return weights def inference(input_tensor, regularizer): with tf.variable_scope('layer1'): weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer) biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0)) layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'): weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer) biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0)) layer2 = tf.matmul(layer1, weights) + biases return layer2
# 1. 定义神经网络的参数。 BATCH_SIZE = 100 LEARNING_RATE_BASE = 0.8 LEARNING_RATE_DECAY = 0.99 REGULARIZATION_RATE = 0.0001 TRAINING_STEPS = 3000 MOVING_AVERAGE_DECAY = 0.99
# 2. 定义训练的过程并保存TensorBoard的log文件。 def train(mnist): # 输入数据的命名空间。 with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input') y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE) y = inference(x, regularizer) global_step = tf.Variable(0, trainable=False) # 处理滑动平均的命名空间。 with tf.name_scope("moving_average"): variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # 计算损失函数的命名空间。 with tf.name_scope("loss_function"): cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) # 定义学习率、优化方法及每一轮执行训练的操作的命名空间。 with tf.name_scope("train_step"): learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,staircase=True) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variables_averages_op]): train_op = tf.no_op(name='train') writer = tf.summary.FileWriter("F:\temp\log", tf.get_default_graph()) # 训练模型。 with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): xs, ys = mnist.train.next_batch(BATCH_SIZE) if(i % 1000 == 0): # 配置运行时需要记录的信息。 run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) # 运行时记录运行信息的proto。 run_metadata = tf.RunMetadata() _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys},options=run_options, run_metadata=run_metadata) writer.add_run_metadata(run_metadata=run_metadata, tag=("tag%d" % i), global_step=i) print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) else: _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) writer.close()
# 3. 主函数。 def main(argv=None): mnist = input_data.read_data_sets("F:\TensorFlowGoogle\201806-github\datasets\MNIST_data", one_hot=True) train(mnist) if __name__ == '__main__': main()