zoukankan      html  css  js  c++  java
  • 卷积神经网络CNN (Tensorflow 2.1)

    很玄学,没有修改参数,在test上的准确率从98%多变为99.1%了

    参考链接:《简单粗暴Tensorflow》,狂吹

    import tensorflow as tf
    import numpy as np
    
    class MNISTLoader():
        def __init__(self):
            mnist = tf.keras.datasets.mnist
            (self.train_data, self.train_label), (self.test_data, self.test_label) = mnist.load_data()
            # MNIST中的图像默认为uint8(0-255的数字)。以下代码将其归一化到0-1之间的浮点数,并在最后增加一维作为颜色通道
            self.train_data = np.expand_dims(self.train_data.astype(np.float32) / 255.0, axis=-1)      # [60000, 28, 28, 1]
            self.test_data = np.expand_dims(self.test_data.astype(np.float32) / 255.0, axis=-1)        # [10000, 28, 28, 1]
            self.train_label = self.train_label.astype(np.int32)    # [60000]
            self.test_label = self.test_label.astype(np.int32)      # [10000]
            self.num_train_data, self.num_test_data = self.train_data.shape[0], self.test_data.shape[0]
    
        def get_batch(self, batch_size):
            # 从数据集中随机取出batch_size个元素并返回
            index = np.random.randint(0, np.shape(self.train_data)[0], batch_size)
            return self.train_data[index, :], self.train_label[index]
    
    
    # tf.keras.layers.Conv2D(
    #     filters, kernel_size, strides=(1, 1), padding='valid', data_format=None,
    #     dilation_rate=(1, 1), activation=None, use_bias=True,
    #     kernel_initializer='glorot_uniform', bias_initializer='zeros',
    #     kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
    #     kernel_constraint=None, bias_constraint=None, **kwargs
    # )
    # filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).
    # kernel_size:感受野 An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window.
    # strides:横纵步幅
    # padding:valid:表示不够卷积核大小的块,则丢弃;same表示不够卷积核大小的块就补0,所以输出和输入形状相同
    
    class CNN(tf.keras.Model):
        def __init__(self):
            super().__init__()
            self.conv1 = tf.keras.layers.Conv2D(
                filters=32,             # 卷积层神经元(卷积核)数目
                kernel_size=[5, 5],     # 感受野大小
                padding='same',         # padding策略(vaild 或 same)
                activation=tf.nn.relu   # 激活函数
            )
            self.pool = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
            self.conv2 = tf.keras.layers.Conv2D(
                filters=64,
                kernel_size=[5, 5],
                padding='same',
                activation=tf.nn.relu
            )
            self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
            self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
            self.dense2 = tf.keras.layers.Dense(units=10)
    
        def call(self, inputs):
            x = self.conv1(inputs)                  # [batch_size, 28, 28, 32]
            x = self.pool(x)                        # [batch_size, 14, 14, 32]
            x = self.conv2(x)                       # [batch_size, 14, 14, 64]
            x = self.pool(x)                        # [batch_size, 7, 7, 64]
            x = self.flatten(x)                     # [batch_size, 7 * 7 * 64]
            x = self.dense1(x)                      # [batch_size, 1024]
            x = self.dense2(x)                      # [batch_size, 10]
            output = tf.nn.softmax(x)
            return output
    
    num_epochs = 5
    batch_size = 50
    learning_rate = 0.001
    
    model = CNN()
    data_loader = MNISTLoader()
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    
    num_batches = int(data_loader.num_train_data // batch_size * num_epochs)
    for batch_index in range(num_batches):
        X, y = data_loader.get_batch(batch_size) #从数据集中随机选一部分数据
        with tf.GradientTape() as tape:
            y_pred = model(X) #得到预测值
            loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y, y_pred=y_pred)#计算loss
            loss = tf.reduce_mean(loss)
            print("batch %d: loss %f" % (batch_index, loss.numpy()))
        grads = tape.gradient(loss, model.variables) #calc grads
        optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables)) #update grads
    
    #tf.keras.metrics.SparseCategoricalAccuracy是一个评估器
    #不太懂为什么分批检查预测值
    sparse_categorical_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
    num_batches = int(data_loader.num_test_data // batch_size)
    for batch_index in range(num_batches):
        start_index, end_index = batch_index * batch_size, (batch_index + 1) * batch_size
        #model.predict 输入测试数据,输出预测结果
        y_pred = model.predict(data_loader.test_data[start_index: end_index])
        sparse_categorical_accuracy.update_state(y_true=data_loader.test_label[start_index: end_index], y_pred=y_pred)
    print("test accuracy: %f" % sparse_categorical_accuracy.result())
  • 相关阅读:
    postman 的基础使用篇(一)
    C# struct
    细说javascript typeof操作符
    javascript-void keyword
    深圳求生记
    博客园开篇--对程序员的一点看法
    京东2018校园招聘 数据开发
    数据结构之哈希、哈希函数、哈希表
    scrapy入门教程
    linux学习笔记1
  • 原文地址:https://www.cnblogs.com/lalalatianlalu/p/12499618.html
Copyright © 2011-2022 走看看