zoukankan      html  css  js  c++  java
  • NO.3:自学tensorflow之路MNIST识别,神经网络拓展

    引言

      最近自学GRU神经网络,感觉真的不简单。为了能够快速跑完程序,给我的渣渣笔记本(GT650M)也安装了一个GPU版的tensorflow。顺便也更新了版本到了tensorflow-gpu 1.7。之前相关的程序代码依然兼容,可以运行。刚好遇到五一假期,一个人在实验室发霉,就顺便随手做了一下MNIST手写体数字的BP神经网络识别程序。做的比较简单,日后可能会扩充这一篇随笔,所以大概算是个草稿版。

    正文

    MNIST数据准备

      MNIST手写体数字识别,在人工智能中的地位有点像’hello world‘在编程中的地位,算是一个入门程序。从这个程序中其实可以扩展出很多tensorflow的使用方法。然而由于最近犯春困,就简单写一下。准备数据可以使用Google已经提供好的input_data.py文件。这里也一并提供一下源代码。

    # Copyright 2015 Google Inc. All Rights Reserved.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    # ==============================================================================
    """Functions for downloading and reading MNIST data."""
    from __future__ import absolute_import
    from __future__ import division
    from __future__ import print_function
    import gzip
    import os
    import tensorflow.python.platform
    import numpy
    from six.moves import urllib
    from six.moves import xrange  # pylint: disable=redefined-builtin
    import tensorflow as tf
    SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
    def maybe_download(filename, work_directory):
      """Download the data from Yann's website, unless it's already here."""
      if not os.path.exists(work_directory):
        os.mkdir(work_directory)
      filepath = os.path.join(work_directory, filename)
      if not os.path.exists(filepath):
        filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
        statinfo = os.stat(filepath)
        print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
      return filepath
    def _read32(bytestream):
      dt = numpy.dtype(numpy.uint32).newbyteorder('>')
      return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
    def extract_images(filename):
      """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
      print('Extracting', filename)
      with gzip.open(filename) as bytestream:
        magic = _read32(bytestream)
        if magic != 2051:
          raise ValueError(
              'Invalid magic number %d in MNIST image file: %s' %
              (magic, filename))
        num_images = _read32(bytestream)
        rows = _read32(bytestream)
        cols = _read32(bytestream)
        buf = bytestream.read(rows * cols * num_images)
        data = numpy.frombuffer(buf, dtype=numpy.uint8)
        data = data.reshape(num_images, rows, cols, 1)
        return data
    def dense_to_one_hot(labels_dense, num_classes=10):
      """Convert class labels from scalars to one-hot vectors."""
      num_labels = labels_dense.shape[0]
      index_offset = numpy.arange(num_labels) * num_classes
      labels_one_hot = numpy.zeros((num_labels, num_classes))
      labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
      return labels_one_hot
    def extract_labels(filename, one_hot=False):
      """Extract the labels into a 1D uint8 numpy array [index]."""
      print('Extracting', filename)
      with gzip.open(filename) as bytestream:
        magic = _read32(bytestream)
        if magic != 2049:
          raise ValueError(
              'Invalid magic number %d in MNIST label file: %s' %
              (magic, filename))
        num_items = _read32(bytestream)
        buf = bytestream.read(num_items)
        labels = numpy.frombuffer(buf, dtype=numpy.uint8)
        if one_hot:
          return dense_to_one_hot(labels)
        return labels
    class DataSet(object):
      def __init__(self, images, labels, fake_data=False, one_hot=False,
                   dtype=tf.float32):
        """Construct a DataSet.
        one_hot arg is used only if fake_data is true.  `dtype` can be either
        `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
        `[0, 1]`.
        """
        dtype = tf.as_dtype(dtype).base_dtype
        if dtype not in (tf.uint8, tf.float32):
          raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                          dtype)
        if fake_data:
          self._num_examples = 10000
          self.one_hot = one_hot
        else:
          assert images.shape[0] == labels.shape[0], (
              'images.shape: %s labels.shape: %s' % (images.shape,
                                                     labels.shape))
          self._num_examples = images.shape[0]
          # Convert shape from [num examples, rows, columns, depth]
          # to [num examples, rows*columns] (assuming depth == 1)
          assert images.shape[3] == 1
          images = images.reshape(images.shape[0],
                                  images.shape[1] * images.shape[2])
          if dtype == tf.float32:
            # Convert from [0, 255] -> [0.0, 1.0].
            images = images.astype(numpy.float32)
            images = numpy.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
      @property
      def images(self):
        return self._images
      @property
      def labels(self):
        return self._labels
      @property
      def num_examples(self):
        return self._num_examples
      @property
      def epochs_completed(self):
        return self._epochs_completed
      def next_batch(self, batch_size, fake_data=False):
        """Return the next `batch_size` examples from this data set."""
        if fake_data:
          fake_image = [1] * 784
          if self.one_hot:
            fake_label = [1] + [0] * 9
          else:
            fake_label = 0
          return [fake_image for _ in xrange(batch_size)], [
              fake_label for _ in xrange(batch_size)]
        start = self._index_in_epoch
        self._index_in_epoch += batch_size
        if self._index_in_epoch > self._num_examples:
          # Finished epoch
          self._epochs_completed += 1
          # Shuffle the data
          perm = numpy.arange(self._num_examples)
          numpy.random.shuffle(perm)
          self._images = self._images[perm]
          self._labels = self._labels[perm]
          # Start next epoch
          start = 0
          self._index_in_epoch = batch_size
          assert batch_size <= self._num_examples
        end = self._index_in_epoch
        return self._images[start:end], self._labels[start:end]
    def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
      class DataSets(object):
        pass
      data_sets = DataSets()
      if fake_data:
        def fake():
          return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
        data_sets.train = fake()
        data_sets.validation = fake()
        data_sets.test = fake()
        return data_sets
      TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
      TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
      TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
      TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
      VALIDATION_SIZE = 5000
      local_file = maybe_download(TRAIN_IMAGES, train_dir)
      train_images = extract_images(local_file)
      local_file = maybe_download(TRAIN_LABELS, train_dir)
      train_labels = extract_labels(local_file, one_hot=one_hot)
      local_file = maybe_download(TEST_IMAGES, train_dir)
      test_images = extract_images(local_file)
      local_file = maybe_download(TEST_LABELS, train_dir)
      test_labels = extract_labels(local_file, one_hot=one_hot)
      validation_images = train_images[:VALIDATION_SIZE]
      validation_labels = train_labels[:VALIDATION_SIZE]
      train_images = train_images[VALIDATION_SIZE:]
      train_labels = train_labels[VALIDATION_SIZE:]
      data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
      data_sets.validation = DataSet(validation_images, validation_labels,
                                     dtype=dtype)
      data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
      return data_sets

      下载保存后,将input_data.py文件放入工程目录中,然后新建工程文件,使用以下两行代码,就可以完成整个MNIST数据的准备。

    import input_data
    
    mnist =  input_data.read_data_sets('MNIST_data/', one_hot=True)

      这样,就会自动下载好数据文件到工程目录下的‘/MNIST_data/’中。如果已经下载就会跳过下载,然后将train,valiation和test三个数据集保存在mnist变量之中。

    神经网络的扩展

      这一部分,以后慢慢填补,现在就用最简单的BP实现,BP的内容可以参考上一篇随笔。

    损失函数

      神经网络模型的效果以及优化的目标是通过损失函数来定义的。不同的优化目标就对应需要采用不同的损失函数。分类问题中,交叉熵是判断输出向量和期望的向量接近程度的一种指标。

      摸了

    优化算法

      摸了

    过拟合

      摸了

    滑动平均模型

      摸了

    模型保存

      摸了

    作业

      完成手写体数字识别程序,并尽可能提高识别的准确率。

    #-*- coding:utf-8 -*-
    #The MNIST database of handwritten digits
    #Author:Kai Z
    
    import tensorflow as tf
    import numpy as np
    import input_data
    
    #创建MNIST数据,存储于/MNIST_data目录下
    #mnist.train mnist.test
    mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
    
    #神经网络超参数
    input_node = 784
    output_node = 10
    hide_node = 100
    batch_size = 100
    learning_rate = 1e-3
    training_steps = 5000
    
    
    x = tf.placeholder(tf.float32,[None,input_node])
    y = tf.placeholder(tf.float32,[None,output_node])
    
    hidden_weight = tf.Variable(tf.random_normal([input_node,hide_node],stddev = 1,seed = 1))
    hidden_bias = tf.Variable(tf.zeros([1,hide_node],tf.float32))
    output_weight = tf.Variable(tf.random_normal([hide_node,output_node],stddev = 1,seed = 1))
    output_bias = tf.Variable(tf.zeros([1,output_node],tf.float32))
    
    h = tf.nn.tanh(tf.matmul(x,hidden_weight)+hidden_bias)
    y_pred = tf.nn.sigmoid(tf.matmul(h,output_weight)+output_bias)
    
    correct_predict = tf.equal(tf.argmax(y_pred,1),tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_predict,tf.float32))
    
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,labels=tf.argmax(y,1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    
    train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_mean)
    init_op = tf.global_variables_initializer()
    
    with tf.Session() as sess:
        sess.run(init_op)
    
        for i in range(training_steps):
            input_batch,output_batch = mnist.train.next_batch(batch_size)
            sess.run(train_op,feed_dict={x:input_batch,y:output_batch})
    
            if i%100 == 0:
                right_rate = sess.run(accuracy,feed_dict = {x:mnist.validation.images,y:mnist.validation.labels})
                print('训练%d次后,训练正确率为百分之%f'%(i,right_rate*100))
        right_rate = sess.run(accuracy,feed_dict = {x:mnist.test.images,y:mnist.test.labels})
        print('训练%d次后,测试正确率为百分之%f'%(i,right_rate*100))

      最终,结果为,测试准确率达到了91%。仍然有改进的空间。

  • 相关阅读:
    android中给TextView或者Button的文字添加阴影效果
    android:layout_weight详解
    android Button 颜色的变化(点击,放开,点击不放)
    Android之最简单的ImageView加边框方法
    泳道图
    使用Navicat生成ER关系图并导出
    IDEA须知
    Error running Tomcat8: Address localhost:1099 is already in use(IDEA错误)
    3分钟打动投资人:商业计划书篇
    HTML按钮属性
  • 原文地址:https://www.cnblogs.com/zk71124720/p/8974922.html
Copyright © 2011-2022 走看看