zoukankan      html  css  js  c++  java
  • 简单CNN 测试例

    1.训练数据:

    import tensorflow as tf
    import cv2
    import os
    import numpy as np
    import time
    import matplotlib.pyplot as plt
    from sklearn.metrics import confusion_matrix, classification_report
    
    drop_prob = 0.4
    input_imgs = tf.placeholder(dtype=tf.float32,shape=[None,128,64,3],name='input_imgs')
    input_label = tf.placeholder(dtype=tf.float32,shape=[None,2],name='input_label')
    
    # 初始化权重(卷积核)
    def weight_init(shape):
        weight = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
        return tf.Variable(weight)
    
    
    # 初始化偏置项
    def bias_init(shape):
        bias = tf.random_normal(shape, dtype=tf.float32)
        return tf.Variable(bias)
    
    # 全连接层
    def fch_init(layer1, layer2, const=1):
        min = -const * (6.0 / (layer1 + layer2))
        max = -min
        weight = tf.random_uniform([layer1, layer2], minval=min, maxval=max, dtype=tf.float32)
        return tf.Variable(weight)
    
    
    # 卷积层
    def conv2d(images, weight):
        return tf.nn.conv2d(images, weight, strides=[1, 1, 1, 1], padding='SAME')
    
    
    # 最大池化层
    def max_pool2x2(images, tname):
        return tf.nn.max_pool(images, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=tname)
    
    # 卷积核3*3*3 16个     第一层卷积
    w1 = weight_init([3, 3, 3, 16])
    b1 = bias_init([16])
    # 结果 NHWC  N H W C
    conv_1 = conv2d(input_imgs, w1) + b1
    relu_1 = tf.nn.relu(conv_1, name='relu_1')
    max_pool_1 = max_pool2x2(relu_1, 'max_pool_1')
    
    # 卷积核3*3*16  32个  第二层卷积
    w2 = weight_init([3, 3, 16, 32])
    b2 = bias_init([32])
    conv_2 = conv2d(max_pool_1, w2) + b2
    # 激活层
    relu_2 = tf.nn.relu(conv_2, name='relu_2')
    # 亚采样层
    max_pool_2 = max_pool2x2(relu_2, 'max_pool_2')
    
    w3 = weight_init([3, 3, 32, 64])
    b3 = bias_init([64])
    conv_3 = conv2d(max_pool_2, w3) + b3
    relu_3 = tf.nn.relu(conv_3, name='relu_3')
    max_pool_3 = max_pool2x2(relu_3, 'max_pool_3')
    
    print(max_pool_3.shape, '-------扁平-------',max_pool_3.shape[1]*max_pool_3.shape[2]*max_pool_3.shape[3])
    f_input = tf.reshape(max_pool_3, [-1, max_pool_3.shape[1]*max_pool_3.shape[2]*max_pool_3.shape[3]])
    print('-=--=', f_input.shape)
    
    # 全连接第一层 31*31*32,512
    f_w1 = fch_init(8192, 512)
    f_b1 = bias_init([512])
    f_r1 = tf.matmul(f_input, f_w1) + f_b1
    f_relu_r1 = tf.nn.relu(f_r1)
    # 抛弃一部分神经元,防止过拟合
    f_dropout_r1 = tf.nn.dropout(f_relu_r1, drop_prob)
    print('f_dropout_r1.shape-----------', f_dropout_r1.shape)

    #第二层 f_w2 = fch_init(512, 128) f_b2 = bias_init([128]) f_r2 = tf.matmul(f_dropout_r1, f_w2) + f_b2 f_relu_r2 = tf.nn.relu(f_r2) f_dropout_r2 = tf.nn.dropout(f_relu_r2, drop_prob) # 全连接第三层 512,2 f_w3 = fch_init(128, 2) f_b3 = bias_init([2]) f_r3 = tf.matmul(f_dropout_r2, f_w3) + f_b3 # print(f_r3.shape, '-=-===============') f_softmax = tf.nn.softmax(f_r3, name='f_softmax') # 定义交叉熵 cross_entry = tf.reduce_mean(tf.reduce_sum(-input_label * tf.log(f_softmax))) optimizer = tf.train.AdamOptimizer(0.0001).minimize(cross_entry) # 计算准确率 arg1 = tf.argmax(input_label, 1) arg2 = tf.argmax(f_softmax, 1) cos = tf.equal(arg1, arg2) acc = tf.reduce_mean(tf.cast(cos, dtype=tf.float32)) sess = tf.Session() sess.run(tf.global_variables_initializer()) train_img = [] train_labels = [] test_img = [] test_labels = [] images = [] labels = [] # for root, dirs, files in os.walk('../img/pos/'): # images.append(os.path.join('../img.pos',)) for fileName in os.listdir('../img/pos'): images.append([os.path.join('../img/pos', fileName)]) labels.append([1,0]) for fileName in os.listdir('../img/neg'): images.append([os.path.join('../img/neg', fileName)]) labels.append([0,1]) images = np.array(images) labels = np.array(labels) permutation = np.random.permutation(labels.shape[0]) images = images[permutation,:] labels = labels[permutation,:]
    #获取训练数据或者测试数据 def get_train_data(batch,isTrain=True): global test_labels,test_img if isTrain: train_num = int(labels.shape[0]*0.8) train_img = images[:train_num,:] train_labels = labels[:train_num,:] test_img = images[train_num:,:] test_labels = labels[train_num:,:] # print(train_img[batch:batch+20], train_labels[batch:batch+20]) return train_img[batch*20:(batch+1)*20], train_labels[batch*20:(batch+1)*20] else: return test_img[batch*20:(batch+1)*20], test_labels[batch*20:(batch+1)*20] # get_train_data(20) def read_img(train_img): # print(train_img) imgs = [] for i in range(20): img = cv2.imread(train_img[i][0]) imgs.append(img) # cv2.imshow('12',img) # cv2.waitKey(0) imgs = np.array(imgs) return imgs # print(imgs) Cost = [] Accuracy=[] start_time = time.time() for i in range(100): train_img, train_labels = get_train_data(i) imgs = read_img(train_img) result,acc1,cross_entry_r,cos1,f_softmax1,relu_1_r= sess.run([optimizer,acc,cross_entry,cos,f_softmax,relu_1],feed_dict={input_imgs:imgs,input_label:train_labels}) print("rpoch: {}, accurate: {} , cross_loss:{}".format(i,acc1,cross_entry_r)) Cost.append(cross_entry_r) Accuracy.append(acc1) print('total time:%d'%(time.time()-start_time)) # 代价函数曲线 fig1,ax1 = plt.subplots(figsize=(10,7)) plt.plot(Cost) print('---------cost-----------',Cost) ax1.set_xlabel('Epochs') ax1.set_ylabel('Cost') plt.title('Cross Loss') plt.grid() plt.show() # 准确率曲线 fig7,ax7 = plt.subplots(figsize=(10,7)) plt.plot(Accuracy) ax7.set_xlabel('Epochs') ax7.set_ylabel('Accuracy Rate') plt.title('Train Accuracy Rate') plt.grid() plt.show() #测试 test_img,test_labels = get_train_data(1,False) test_img = read_img(test_img) arg2_r = sess.run(arg2,feed_dict={input_imgs:test_img,input_label:test_labels}) arg1_r = sess.run(arg1,feed_dict={input_imgs:test_img,input_label:test_labels}) print (classification_report(arg1_r, arg2_r)) #保存模型 global_step:训练模型的命名 saver = tf.train.Saver() saver.save(sess, './model/my-gender-v1.0',global_step=123)

    2. 从保存的模型中读取数据

    import tensorflow as tf
    import numpy as np
    import cv2
    import  matplotlib.pyplot as plt
    import  os
    
    #取一张图片
    
    # img/pos/758.jpg
    img = cv2.imread('../img/pos/760.jpg')
    # labels = train_data.labels[0:1]
    fig2,ax2 = plt.subplots(figsize=(2,2))
    ax2.imshow(img)
    plt.show()
    img = np.reshape(img,[1,128,64,3])
    
    sess = tf.Session()
    graph_path=os.path.abspath('./model/my-gender-v1.0-123.meta')
    model=os.path.abspath('./model/')
    
    server = tf.train.import_meta_graph(graph_path)
    server.restore(sess,tf.train.latest_checkpoint(model))
    
    graph = tf.get_default_graph()
    
    #填充feed_dict
    x = graph.get_tensor_by_name('input_imgs:0')
    y = graph.get_tensor_by_name('input_label:0')
    feed_dict={x:img,y:[[1,0]]}
    
    
    #第一层卷积+池化
    relu_1 = graph.get_tensor_by_name('relu_1:0')
    max_pool_1 = graph.get_tensor_by_name('max_pool_1:0')
    
    #第二层卷积+池化
    relu_2 = graph.get_tensor_by_name('relu_2:0')
    max_pool_2 = graph.get_tensor_by_name('max_pool_2:0')
    
    #第三层卷积+池化
    relu_3 = graph.get_tensor_by_name('relu_3:0')
    max_pool_3 = graph.get_tensor_by_name('max_pool_3:0')
    
    #全连接最后一层输出
    f_softmax = graph.get_tensor_by_name('f_softmax:0')
    
    
    #relu_1_r,max_pool_1_,relu_2,max_pool_2,relu_3,max_pool_3,f_softmax=sess.run([relu_1,max_pool_1,relu_2,max_pool_2,relu_3,max_pool_3,f_softmax],feed_dict)
    
    
    
    #----------------------------------各个层特征可视化-------------------------------
    
    
    
    
    #conv1 特征
    r1_relu = sess.run(relu_1,feed_dict)
    print('r1_relu',r1_relu.shape)
    # 将矩阵转置
    r1_tranpose = sess.run(tf.transpose(r1_relu,[3,0,1,2]))
    print('r1_tranpose',r1_tranpose.shape)
    fig,ax = plt.subplots(nrows=1,ncols=16,figsize=(16,1))
    for i in range(16):
        ax[i].imshow(r1_tranpose[i][0])
    plt.title('Conv1 16*112*92')
    plt.show()
    
    #pool1特征
    max_pool_1 = sess.run(max_pool_1,feed_dict)
    r1_tranpose = sess.run(tf.transpose(max_pool_1,[3,0,1,2]))
    fig,ax = plt.subplots(nrows=1,ncols=16,figsize=(16,1))
    for i in range(16):
        ax[i].imshow(r1_tranpose[i][0])
    plt.title('Pool1 16*56*46')
    plt.show()
    
    
    #conv2 特征
    r2_relu = sess.run(relu_2,feed_dict)
    r2_tranpose = sess.run(tf.transpose(r2_relu,[3,0,1,2]))
    fig,ax = plt.subplots(nrows=1,ncols=32,figsize=(32,1))
    for i in range(32):
        ax[i].imshow(r2_tranpose[i][0])
    plt.title('Conv2 32*56*46')
    plt.show()
    
    #pool2 特征
    max_pool_2 = sess.run(max_pool_2,feed_dict)
    tranpose = sess.run(tf.transpose(max_pool_2,[3,0,1,2]))
    fig,ax = plt.subplots(nrows=1,ncols=32,figsize=(32,1))
    for i in range(32):
        ax[i].imshow(tranpose[i][0])
    plt.title('Pool2 32*28*23')
    plt.show()
    
    
    #conv3 特征
    r3_relu = sess.run(relu_3,feed_dict)
    tranpose = sess.run(tf.transpose(r3_relu,[3,0,1,2]))
    fig,ax = plt.subplots(nrows=1,ncols=64,figsize=(32,1))
    for i in range(64):
        ax[i].imshow(tranpose[i][0])
    plt.title('Conv3 64*28*23')
    plt.show()
    
    #pool3 特征
    max_pool_3 = sess.run(max_pool_3,feed_dict)
    tranpose = sess.run(tf.transpose(max_pool_3,[3,0,1,2]))
    fig,ax = plt.subplots(nrows=1,ncols=64,figsize=(32,1))
    for i in range(64):
        ax[i].imshow(tranpose[i][0])
    plt.title('Pool3 64*14*12')
    plt.show()
    
    print(sess.run(f_softmax,feed_dict))
    

      

    注意:

      卷积神经网络:conv2d ->pool->relu(softmax二分类)  多层卷积神经网络的使用,注意使用卷积核的个数,步长及大小。

  • 相关阅读:
    Jmeter之http性能测试实战 非GUI模式压测 NON-GUI模式 结果解析TPS——干货(十一)
    UI Recorder 自动化测试 回归原理(九)
    UI Recorder 自动化测试 录制原理(八)
    UI Recorder 自动化测试 整体架构(七)
    UI Recorder 自动化测试 配置项(六)
    UI Recorder 自动化测试 工具栏使用(五)
    UI Recorder 自动化测试 回归测试(四)
    UI Recorder 自动化测试 录制(三)
    UI Recorder 自动化测试工具安装问题疑难杂症解决(二)
    UI Recorder 自动化测试安装教程(一)
  • 原文地址:https://www.cnblogs.com/wbdream/p/10264836.html
Copyright © 2011-2022 走看看