zoukankan      html  css  js  c++  java
  • 002-keras简单应用

    # Keras为图片数据输入提供了一个很好的接口,即Keras.preprocessing.image.ImageDataGenerator类
    # 这个类生成一个数据生成器Generator对象,依照循环批量产生对应于图像信息的多维矩阵
    # 根据后台运行环境的不同,比如是TensorFlow还是Theano,多维矩阵的不同维度对应的信息
    # 分别是图像二维的像素点,第三维对应于色彩通道,因此如果是灰度图像,那么色彩通道只有一个
    # 维度;如果是RGB色彩,那么色彩通道有三个维度
    # 序列模型
    # 序列模型属于通用模型的一种,因为很常见,所以这里单独列出来进行介绍,这种模型各层之间
    # 是依次顺序的线性关系,在第k层和第k+1层之间可以加上各种元素来构造神经网络
    # 这些元素可以通过一个列表来制定,然后作为参数传递给序列模型来生成相应的模型


    from keras.models import Sequential #创建模型
    from keras.layers import Dense      #全连接
    from keras.layers import Activation #激活层
    

    Using TensorFlow backend.

    用list添加训练模型

    # Dense相当于构建一个全连接层,32指的是全连接层上面神经元的个数
    layers = [Dense(32, input_shape=(784,)),
              Activation('relu'),
              Dense(10),
              Activation('softmax')]
    model = Sequential(layers)
    model.summary()
    

      

    _________________________________________________________________
    Layer (type)                 Output Shape              Param #   
    =================================================================
    dense_1 (Dense)              (None, 32)                25120     
    _________________________________________________________________
    activation_1 (Activation)    (None, 32)                0         
    _________________________________________________________________
    dense_2 (Dense)              (None, 10)                330       
    _________________________________________________________________
    activation_2 (Activation)    (None, 10)                0         
    =================================================================
    Total params: 25,450
    Trainable params: 25,450
    Non-trainable params: 0
    _________________________________________________________________

    25120=784*32+32,(32个W*784+32个B)     
    
    
    330 = 32*10+10,(10个W*32+10个B)     
    
    
    25,450 = 25120+330

    用add添加训练模型
    model = Sequential()
    model.add(Dense(32, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dense(10))
    model.add(Activation('softmax'))
    model.summary()
    
    
    

      

    _________________________________________________________________
    Layer (type)                 Output Shape              Param #   
    =================================================================
    dense_3 (Dense)              (None, 32)                25120     
    _________________________________________________________________
    activation_3 (Activation)    (None, 32)                0         
    _________________________________________________________________
    dense_4 (Dense)              (None, 10)                330       
    _________________________________________________________________
    activation_4 (Activation)    (None, 10)                0         
    =================================================================
    Total params: 25,450
    Trainable params: 25,450
    Non-trainable params: 0
    _________________________________________________________________




    添加训练模型的一般方法:
    # 通用模型
    # 通用模型可以用来设计非常复杂、任意拓扑结构的神经网络,例如有向无环图网络
    # 类似于序列模型,通用模型通过函数化的应用接口来定义模型
    # 使用函数化的应用接口有好多好处,比如:决定函数执行结果的唯一要素是其返回值,而决定
    # 返回值的唯一要素则是其参数,这大大减轻了代码测试的工作量

    # 在通用模型中,定义的时候,从输入的多维矩阵开始,然后定义各层及其要素,最后定义输出层
    # 将输入层和输出层作为参数纳入通用模型中就可以定义一个模型对象
     
    # 通用模型
    # 通用模型可以用来设计非常复杂、任意拓扑结构的神经网络,例如有向无环图网络
    # 类似于序列模型,通用模型通过函数化的应用接口来定义模型
    # 使用函数化的应用接口有好多好处,比如:决定函数执行结果的唯一要素是其返回值,而决定
    # 返回值的唯一要素则是其参数,这大大减轻了代码测试的工作量
    
    # 在通用模型中,定义的时候,从输入的多维矩阵开始,然后定义各层及其要素,最后定义输出层
    # 将输入层和输出层作为参数纳入通用模型中就可以定义一个模型对象
    
    from keras.layers import Input
    from keras.layers import Dense
    from keras.models import Model
    
    
    
    
    # 定义输入层
    input = Input(shape=(784,))
    # 定义各个连接层,假设从输入层开始,定义两个隐含层,都有64个神经元,都使用relu激活函数
    x = Dense(64, activation='relu')(input)
    x = Dense(64, activation='relu')(x)
    # 定义输出层,使用最近的隐含层作为参数
    y = Dense(10, activation='softmax')(x)
    
    # 所有要素都齐备以后,就可以定义模型对象了,参数很简单,分别是输入和输出,其中包含了
    # 中间的各种信息
    model = Model(inputs=input, outputs=y)
    
    # 当模型对象定义完成之后,就可以进行编译了,并对数据进行拟合,拟合的时候也有两个参数
    # 分别对应于输入和输出
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(data, labels)
    

      

    实例

    import numpy as np
    import keras
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import Dropout
    from keras.layers import Flatten
    from keras.layers.convolutional import Conv2D
    from keras.layers.convolutional import MaxPooling2D
    import tarfile
    import os
    #此处无法上传用npz格式,只能使用原先的TensorFlow数据集,其实是一个东西,不同格式而已,keras的底层也是TensorFlow
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets("datalab/5611/", one_hot=True)
     
    X_train, y_train = mnist.train.images,mnist.train.labels
    X_test, y_test = mnist.test.images, mnist.test.labels
    X_train = X_train.reshape(-1, 28, 28,1).astype('float32')
    X_test = X_test.reshape(-1,28, 28,1).astype('float32')
    
    
    
    # # 先读入数据
    # (X_train, y_train), (X_test, y_test) = mnist.load_data("")
    # 看一下数据集的样子
    print(X_train[0].shape)
    print(y_train[0])
    
    # 下面把训练集中的手写黑白字体变成标准的四维张量形式,即(样本数量,长,宽,1)
    # 并把像素值变成浮点格式
    # X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')
    # X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')
    
    # 由于每个像素值都介于0到255,所以这里统一除以255,把像素值控制在0-1范围
    X_train /= 255
    X_test /= 255
    
    
    # # 由于输入层需要10个节点,所以最好把目标数字0-9做成One Hot编码的形式
    # def tran_y(y):
    #     y_ohe = np.zeros(10)
    #     y_ohe[y] = 1
    #     return y_ohe
    
    
    # # 把标签用One Hot编码重新表示一下
    # y_train_ohe = np.array([tran_y(y_train[i]) for i in range(len(y_train))])
    # y_test_ohe = np.array([tran_y(y_test[i]) for i in range(len(y_test))])
    
    # 搭建卷积神经网络
    model = Sequential()
    # 添加一层卷积层,构造64个过滤器,每个过滤器覆盖范围是3*3*1
    # 过滤器步长为1,图像四周补一圈0,并用relu进行非线性变化
    model.add(Conv2D(filters=32, kernel_size=(5, 5), strides=(1, 1), padding='same',
                     input_shape=(28, 28, 1), activation='relu'))
    # 添加一层最大池化层
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # 设立Dropout层,Dropout的概率为0.5
    model.add(Dropout(0.2))
    
    # 重复构造,搭建深度网络
    model.add(Conv2D(64, kernel_size=(5, 5), strides=(1, 1), padding='same',
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    
    # 把当前层节点展平
    model.add(Flatten())
    
    # 构造全连接层神经网络层
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    
    
    adam = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-5, amsgrad=False)
    
    # 定义损失函数,一般来说分类问题的损失函数都选择采用交叉熵
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
    
    # 放入批量样本,进行训练
    model.fit(X_train, y_train, validation_data=(X_test, y_test),epochs=20, batch_size=128)
    
    # 在测试集上评价模型的准确率
    # verbose : 进度表示方式。0表示不显示数据,1表示显示进度条
    scores = model.evaluate(X_test, y_test_ohe, verbose=0)
    
    
    

      

    Extracting datalab/5611/train-images-idx3-ubyte.gz
    Extracting datalab/5611/train-labels-idx1-ubyte.gz
    Extracting datalab/5611/t10k-images-idx3-ubyte.gz
    Extracting datalab/5611/t10k-labels-idx1-ubyte.gz
    (28, 28, 1)
    [ 0.  0.  0.  0.  0.  0.  0.  1.  0.  0.]
    WARNING:tensorflow:Variable *= will be deprecated. Use variable.assign_mul if you want assignment to the variable value or 'x = x * y' if you want a new python Tensor object.
    Train on 55000 samples, validate on 10000 samples
    Epoch 1/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 1.6362 - acc: 0.4422 - val_loss: 0.8287 - val_acc: 0.7440
    Epoch 2/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.7378 - acc: 0.7618 - val_loss: 0.5088 - val_acc: 0.8453
    Epoch 3/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.4881 - acc: 0.8455 - val_loss: 0.3199 - val_acc: 0.9081
    Epoch 4/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.3617 - acc: 0.8867 - val_loss: 0.2463 - val_acc: 0.9274
    Epoch 5/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.2882 - acc: 0.9096 - val_loss: 0.1939 - val_acc: 0.9417
    Epoch 6/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.2451 - acc: 0.9224 - val_loss: 0.1646 - val_acc: 0.9483
    Epoch 7/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.2134 - acc: 0.9326 - val_loss: 0.1425 - val_acc: 0.9553
    Epoch 8/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.1862 - acc: 0.9413 - val_loss: 0.1200 - val_acc: 0.9618
    Epoch 9/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.1694 - acc: 0.9458 - val_loss: 0.1126 - val_acc: 0.9633
    Epoch 10/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.1528 - acc: 0.9525 - val_loss: 0.1010 - val_acc: 0.9665
    Epoch 11/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.1421 - acc: 0.9554 - val_loss: 0.0922 - val_acc: 0.9689
    Epoch 12/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.1321 - acc: 0.9585 - val_loss: 0.0829 - val_acc: 0.9711
    Epoch 13/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.1241 - acc: 0.9606 - val_loss: 0.0807 - val_acc: 0.9713
    Epoch 14/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.1176 - acc: 0.9630 - val_loss: 0.0751 - val_acc: 0.9749
    Epoch 15/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.1106 - acc: 0.9646 - val_loss: 0.0668 - val_acc: 0.9780
    Epoch 16/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.1046 - acc: 0.9670 - val_loss: 0.0637 - val_acc: 0.9787
    Epoch 17/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.1002 - acc: 0.9684 - val_loss: 0.0635 - val_acc: 0.9792
    Epoch 18/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.0940 - acc: 0.9696 - val_loss: 0.0606 - val_acc: 0.9781
    Epoch 19/20
    55000/55000 [==============================] - 142s 3ms/step - loss: 0.0886 - acc: 0.9717 - val_loss: 0.0557 - val_acc: 0.9818
    Epoch 20/20
    55000/55000 [==============================] - 141s 3ms/step - loss: 0.0871 - acc: 0.9732 - val_loss: 0.0541 - val_acc: 0.9818




    总结:
    keras的特点在于不用考虑TensorFlow中的每层之间的对接问题,只用填入自己需要的参数量即可。


  • 相关阅读:
    9-1058. 选择题(20)
    8-素数打表
    7- 插入与归并
    6-爱丁顿数(题意理解)
    5-单身狗(时间和空间的相互选择)
    4-1068. 万绿丛中一点红
    3-1067. 试密码
    2-素数打比表
    21-矩形的嵌套
    maven设置打jar包并引入依赖包
  • 原文地址:https://www.cnblogs.com/Mjerry/p/9907135.html
Copyright © 2011-2022 走看看