zoukankan      html  css  js  c++  java
  • Wing IDE编译TesorFlow中Mnist convolutional 实例

      1 #
      2 #     http://www.cnblogs.com/mydebug/
      3 #
      4 from __future__ import absolute_import
      5 from __future__ import division
      6 from __future__ import print_function
      7 
      8 import gzip
      9 import os
     10 import sys
     11 sys.path.append("这里是numpy的路径")//提示:如果没有这句import numpy会报错
     12 import tensorflow.python.platform
     13 
     14 import numpy
     15 from six.moves import urllib
     16 from six.moves import xrange  # pylint: disable=redefined-builtin
     17 import tensorflow as tf
     18 
     19 SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
     20 WORK_DIRECTORY = '/TensorFlow/data'
     21 IMAGE_SIZE = 28
     22 NUM_CHANNELS = 1
     23 PIXEL_DEPTH = 255
     24 NUM_LABELS = 10
     25 VALIDATION_SIZE = 5000  # Size of the validation set.
     26 SEED = 66478  # Set to None for random seed.
     27 BATCH_SIZE = 64
     28 NUM_EPOCHS = 10
     29 
     30 
     31 tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
     32 FLAGS = tf.app.flags.FLAGS
     33 
     34 
     35 def maybe_download(filename):
     36   """Download the data from Yann's website, unless it's already here."""
     37   if not os.path.exists(WORK_DIRECTORY):
     38     os.mkdir(WORK_DIRECTORY)
     39   filepath = os.path.join(WORK_DIRECTORY, filename)
     40   if not os.path.exists(filepath):
     41     filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
     42     statinfo = os.stat(filepath)
     43     print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
     44   return filepath
     45 
     46 
     47 def extract_data(filename, num_images):
     48   """Extract the images into a 4D tensor [image index, y, x, channels].
     49 
     50   Values are rescaled from [0, 255] down to [-0.5, 0.5].
     51   """
     52   print('Extracting', filename)
     53   with gzip.open(filename) as bytestream:
     54     bytestream.read(16)
     55     buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
     56     data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
     57     data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
     58     data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
     59     return data
     60 
     61 
     62 def extract_labels(filename, num_images):
     63   """Extract the labels into a 1-hot matrix [image index, label index]."""
     64   print('Extracting', filename)
     65   with gzip.open(filename) as bytestream:
     66     bytestream.read(8)
     67     buf = bytestream.read(1 * num_images)
     68     labels = numpy.frombuffer(buf, dtype=numpy.uint8)
     69   # Convert to dense 1-hot representation.
     70   return (numpy.arange(NUM_LABELS) == labels[:, None]).astype(numpy.float32)
     71 
     72 
     73 def fake_data(num_images):
     74   """Generate a fake dataset that matches the dimensions of MNIST."""
     75   data = numpy.ndarray(
     76       shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
     77       dtype=numpy.float32)
     78   labels = numpy.zeros(shape=(num_images, NUM_LABELS), dtype=numpy.float32)
     79   for image in xrange(num_images):
     80     label = image % 2
     81     data[image, :, :, 0] = label - 0.5
     82     labels[image, label] = 1.0
     83   return data, labels
     84 
     85 
     86 def error_rate(predictions, labels):
     87   """Return the error rate based on dense predictions and 1-hot labels."""
     88   return 100.0 - (
     89       100.0 *
     90       numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(labels, 1)) /
     91       predictions.shape[0])
     92 
     93 
     94 def main(argv=None):  # pylint: disable=unused-argument
     95   if FLAGS.self_test:
     96     print('Running self-test.')
     97     train_data, train_labels = fake_data(256)
     98     validation_data, validation_labels = fake_data(16)
     99     test_data, test_labels = fake_data(256)
    100     num_epochs = 1
    101   else:
    102     # Get the data.
    103     train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
    104     train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
    105     test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
    106     test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
    107 
    108     # Extract it into numpy arrays.
    109     train_data = extract_data(train_data_filename, 60000)
    110     train_labels = extract_labels(train_labels_filename, 60000)
    111     test_data = extract_data(test_data_filename, 10000)
    112     test_labels = extract_labels(test_labels_filename, 10000)
    113 
    114     # Generate a validation set.
    115     validation_data = train_data[:VALIDATION_SIZE, :, :, :]
    116     validation_labels = train_labels[:VALIDATION_SIZE]
    117     train_data = train_data[VALIDATION_SIZE:, :, :, :]
    118     train_labels = train_labels[VALIDATION_SIZE:]
    119     num_epochs = NUM_EPOCHS
    120   train_size = train_labels.shape[0]
    121 
    122   # This is where training samples and labels are fed to the graph.
    123   # These placeholder nodes will be fed a batch of training data at each
    124   # training step using the {feed_dict} argument to the Run() call below.
    125   train_data_node = tf.placeholder(
    126       tf.float32,
    127       shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
    128   train_labels_node = tf.placeholder(tf.float32,
    129                                      shape=(BATCH_SIZE, NUM_LABELS))
    130   # For the validation and test data, we'll just hold the entire dataset in
    131   # one constant node.
    132   validation_data_node = tf.constant(validation_data)
    133   test_data_node = tf.constant(test_data)
    134 
    135   # The variables below hold all the trainable weights. They are passed an
    136   # initial value which will be assigned when when we call:
    137   # {tf.initialize_all_variables().run()}
    138   conv1_weights = tf.Variable(
    139       tf.truncated_normal([5, 5, NUM_CHANNELS, 32],  # 5x5 filter, depth 32.
    140                           stddev=0.1,
    141                           seed=SEED))
    142   conv1_biases = tf.Variable(tf.zeros([32]))
    143   conv2_weights = tf.Variable(
    144       tf.truncated_normal([5, 5, 32, 64],
    145                           stddev=0.1,
    146                           seed=SEED))
    147   conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
    148   fc1_weights = tf.Variable(  # fully connected, depth 512.
    149       tf.truncated_normal(
    150           [IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
    151           stddev=0.1,
    152           seed=SEED))
    153   fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
    154   fc2_weights = tf.Variable(
    155       tf.truncated_normal([512, NUM_LABELS],
    156                           stddev=0.1,
    157                           seed=SEED))
    158   fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
    159 
    160   # We will replicate the model structure for the training subgraph, as well
    161   # as the evaluation subgraphs, while sharing the trainable parameters.
    162   def model(data, train=False):
    163     """The Model definition."""
    164     # 2D convolution, with 'SAME' padding (i.e. the output feature map has
    165     # the same size as the input). Note that {strides} is a 4D array whose
    166     # shape matches the data layout: [image index, y, x, depth].
    167     conv = tf.nn.conv2d(data,
    168                         conv1_weights,
    169                         strides=[1, 1, 1, 1],
    170                         padding='SAME')
    171     # Bias and rectified linear non-linearity.
    172     relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
    173     # Max pooling. The kernel size spec {ksize} also follows the layout of
    174     # the data. Here we have a pooling window of 2, and a stride of 2.
    175     pool = tf.nn.max_pool(relu,
    176                           ksize=[1, 2, 2, 1],
    177                           strides=[1, 2, 2, 1],
    178                           padding='SAME')
    179     conv = tf.nn.conv2d(pool,
    180                         conv2_weights,
    181                         strides=[1, 1, 1, 1],
    182                         padding='SAME')
    183     relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
    184     pool = tf.nn.max_pool(relu,
    185                           ksize=[1, 2, 2, 1],
    186                           strides=[1, 2, 2, 1],
    187                           padding='SAME')
    188     # Reshape the feature map cuboid into a 2D matrix to feed it to the
    189     # fully connected layers.
    190     pool_shape = pool.get_shape().as_list()
    191     reshape = tf.reshape(
    192         pool,
    193         [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
    194     # Fully connected layer. Note that the '+' operation automatically
    195     # broadcasts the biases.
    196     hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
    197     # Add a 50% dropout during training only. Dropout also scales
    198     # activations such that no rescaling is needed at evaluation time.
    199     if train:
    200       hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
    201     return tf.matmul(hidden, fc2_weights) + fc2_biases
    202 
    203   # Training computation: logits + cross-entropy loss.
    204   logits = model(train_data_node, True)
    205   loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    206       logits, train_labels_node))
    207 
    208   # L2 regularization for the fully connected parameters.
    209   regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
    210                   tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
    211   # Add the regularization term to the loss.
    212   loss += 5e-4 * regularizers
    213 
    214   # Optimizer: set up a variable that's incremented once per batch and
    215   # controls the learning rate decay.
    216   batch = tf.Variable(0)
    217   # Decay once per epoch, using an exponential schedule starting at 0.01.
    218   learning_rate = tf.train.exponential_decay(
    219       0.01,                # Base learning rate.
    220       batch * BATCH_SIZE,  # Current index into the dataset.
    221       train_size,          # Decay step.
    222       0.95,                # Decay rate.
    223       staircase=True)
    224   # Use simple momentum for the optimization.
    225   optimizer = tf.train.MomentumOptimizer(learning_rate,
    226                                          0.9).minimize(loss,
    227                                                        global_step=batch)
    228 
    229   # Predictions for the minibatch, validation set and test set.
    230   train_prediction = tf.nn.softmax(logits)
    231   # We'll compute them only once in a while by calling their {eval()} method.
    232   validation_prediction = tf.nn.softmax(model(validation_data_node))
    233   test_prediction = tf.nn.softmax(model(test_data_node))
    234 
    235   # Create a local session to run this computation.
    236   with tf.Session() as s:
    237     # Run all the initializers to prepare the trainable parameters.
    238     tf.initialize_all_variables().run()
    239     print('Initialized!')
    240     # Loop through training steps.
    241     for step in xrange(num_epochs * train_size // BATCH_SIZE):
    242       # Compute the offset of the current minibatch in the data.
    243       # Note that we could use better randomization across epochs.
    244       offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
    245       batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
    246       batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
    247       # This dictionary maps the batch data (as a numpy array) to the
    248       # node in the graph is should be fed to.
    249       feed_dict = {train_data_node: batch_data,
    250                    train_labels_node: batch_labels}
    251       # Run the graph and fetch some of the nodes.
    252       _, l, lr, predictions = s.run(
    253           [optimizer, loss, learning_rate, train_prediction],
    254           feed_dict=feed_dict)
    255       if step % 100 == 0:
    256         print('Epoch %.2f' % (float(step) * BATCH_SIZE / train_size))
    257         print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
    258         print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
    259         print('Validation error: %.1f%%' %
    260               error_rate(validation_prediction.eval(), validation_labels))
    261         sys.stdout.flush()
    262     # Finally print the result!
    263     test_error = error_rate(test_prediction.eval(), test_labels)
    264     print('Test error: %.1f%%' % test_error)
    265     if FLAGS.self_test:
    266       print('test_error', test_error)
    267       assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
    268           test_error,)
    269 
    270 
    271 if __name__ == '__main__':
    272   tf.app.run()

    在Ming IDE中新建python文件,复制以上代码,点击Debug,运行成功。

    注:注意源代码中的提示部分。

  • 相关阅读:
    Redis之HyperLoglog
    Mycat面试知识点总结
    Redis持久化之混合aof,rdb
    Redis之缓存穿透,缓存击穿,缓存雪崩
    Redis参数解析之--输出缓冲区
    Https简单流程
    Spring之PropertyPlaceholderConfigurer源码分析
    Redis之位数组的实现(一)--数据结构
    Redis之订阅是怎么实现的
    反射
  • 原文地址:https://www.cnblogs.com/mydebug/p/5036625.html
Copyright © 2011-2022 走看看