zoukankan      html  css  js  c++  java
  • 知乎问题代码

    # -*- coding: utf-8 -*-
    """
    Created on Sat May 19 18:44:40 2018
    
    @author: John Kwok
    """
    
    # import
    import numpy as np
    import tensorflow as tf
    import GetDataUtil
    # 数据读取及预处理
    '''
    定义超参
    '''
    BATCH_SIZE = 128 # 批大小
    EPOCH = 5 # 训练EPOCH次数
    HIDDEN_UNIT = 512 
    KERNEL_SIZE = 3 # 通用卷集核大小
    COVN_1_CHANNELS = 128 # 第一层卷积层的输出chennel个数
    COVN_2_CHANNELS = 64 # 第二层卷积层的输出chennel个数
    INPUT_CHENNELS = 1 # 输入图片的chennels个数
    SCNN_KERNEL_LENGTH = 9 # SCNN 卷集核的宽度
    PAD = SCNN_KERNEL_LENGTH - 1 # 切片后需要pad的个数
    # 切片需要后需要PAD
    PADDING = [[0, 0],
               [0,0],
               [int(PAD / 2),PAD - int(PAD/2)],       
               [0,0]]
    
    '''
    数据读取
    '''
    #X_train_origin, X_test_origin, y_train, y_test = GetDataUtil.getTrainTestSet(dataPath = "../DataSet_NPSave/JustifiedData.npy",test_size = 0.1)
    
    X_train_origin, X_test_origin, y_train, y_test = GetDataUtil.getTrainTestSet(dataPath = "../DataSet_NPSave/RandomCrop_NPAWF_Noise_orgin_ACC_005_10000.npy",test_size = 0.1)
    
    '''2. Data preprocessing'''
    def DataPreprocess(data):
        print("Data Preprocessing,Please wait...")
        data[:,:3,:] = (data[:,:3,:] - np.mean(data[:,:3,:]))/np.std(data[:,:3,:])
        data[:,3:6,:] = (data[:,3:6,:] - np.mean(data[:,3:6,:]))/np.std(data[:,3:6,:])
        
        # 特征构造
        sin = np.sin(data * np.pi / 2)
        cos = np.cos(data * np.pi / 2)
        X_2 = np.power(data,2)
        X_3 = np.power(data,3)   
        ACC_All = np.sqrt((np.power(data[:,0,:],2)+
                          np.power(data[:,1,:],2)+
                          np.power(data[:,2,:],2))/3)[:,np.newaxis,:]    
        Ay_Gz = (data[:,1,:] * data[:,5,:])[:,np.newaxis,:]
        Ay_2_Gz = (np.power(data[:,1,:],2) * data[:,5,:])[:,np.newaxis,:]
        Ay_Gz_2 = (np.power(data[:,5,:],2) * data[:,1,:])[:,np.newaxis,:]
        Ax_Gy = (data[:,0,:] * data[:,4,:])[:,np.newaxis,:]
        Ax_2_Gy = (np.power(data[:,0,:],2) * data[:,4,:])[:,np.newaxis,:]
        Ax_Gy_2 = (np.power(data[:,4,:],2) * data[:,0,:])[:,np.newaxis,:]
        
        Ax_Ay_Az = (data[:,0,:]*data[:,1,:]*data[:,2,:])[:,np.newaxis,:]
        
        newData = np.concatenate((data,sin,cos,X_3,X_2,ACC_All,Ay_Gz,Ay_2_Gz,Ay_Gz_2,Ax_Gy,
                               Ax_2_Gy,Ax_Gy_2,Ax_Ay_Az),axis = 1)
        
        # data *= 255
        print(np.min(data))
        print(np.max(data))
        
        print("Finished!")
        return newData
    #
    X_train = DataPreprocess(X_train_origin)
    X_test = DataPreprocess(X_test_origin)
    #
    #
    data = X_train[:,:,:,np.newaxis]
    label = y_train
    #data = np.random.randn(1000,38,300,1) # NHWC
    #label = np.random.randint(5,size = 1000)
    print(data.shape)
    print(label.shape)
    
    '''
    声明待训练参数
    '''
    # regularizers 也可以尝试0.01
    weights = {
        'w_conv1':tf.get_variable(name = 'w_conv1',
                             shape = [2,
                                      2,
                                      INPUT_CHENNELS,
                                      COVN_1_CHANNELS],
                             initializer = tf.truncated_normal_initializer(0.0001),
                             regularizer = tf.keras.regularizers.l2(l=0.1)),
        'w_conv2':tf.get_variable(name = 'w_conv2',
                             shape = [KERNEL_SIZE,
                                      KERNEL_SIZE,
                                      COVN_1_CHANNELS,
                                      COVN_2_CHANNELS],
                             initializer = tf.truncated_normal_initializer(0.001),
                             regularizer = tf.keras.regularizers.l2(l=0.1))
    }
    
    biases = {
        'b_conv1': tf.get_variable(name = 'b_conv1',
                                  shape= [COVN_1_CHANNELS],
                                  initializer = tf.zeros_initializer(),
                                  regularizer = tf.keras.regularizers.l2(l=0.01)),
        'b_conv2': tf.Variable(tf.zeros(COVN_2_CHANNELS),
                                   name = "b_conv2")                     
    }
    
    tf.summary.histogram('w_conv1',weights['w_conv1'])
    tf.summary.histogram('w_conv2',weights['w_conv2'])
    
    tf.summary.histogram('b_conv1',biases['b_conv1'])
    tf.summary.histogram('b_conv2',biases['b_conv2'])
    
    # 定义网络
    x = tf.placeholder(tf.float32,shape = (None,data.shape[1],data.shape[2],data.shape[3]))
    y = tf.placeholder(tf.int64,shape = (None,1))
    onehot_labels = tf.reshape(tf.one_hot(y,depth = 5),shape=(-1,5))
    
    # 第一个卷积层
    with tf.name_scope('covn_1'):
         conv_out_1 = tf.nn.conv2d(input = x,
                                filter = weights['w_conv1'],
                                strides = [1, 1, 1, 1],
                                padding = "SAME",
                                use_cudnn_on_gpu = True,
                                data_format = 'NHWC',
                                dilations = [1, 1, 1, 1],
                                name = 'conv_out')
         relu_out_1 = tf.nn.relu(tf.nn.bias_add(conv_out_1,biases['b_conv1']),
                                                name = 'relu_out')
         pooling_out_1 = tf.nn.max_pool(relu_out_1,
                                      ksize = [1,3,3,1],
                                      strides = [1,1,1,1],
                                      padding = "SAME",
                                      data_format='NHWC',
                                      name = 'pooling_out')
    
    # 第二个卷积层
    with tf.name_scope('covn_2'):
         conv_out_2 = tf.nn.conv2d(input = pooling_out_1,
                                filter = weights['w_conv2'],
                                strides = [1, 1, 1, 1],
                                padding = "SAME",
                                use_cudnn_on_gpu = True,
                                data_format = 'NHWC',
                                dilations = [1, 1, 1, 1],
                                name = 'conv_out')
         relu_out_2 = tf.nn.relu(tf.nn.bias_add(conv_out_2,biases['b_conv2']),
                                                name = 'relu_out')     
         pooling_out_2 = tf.nn.max_pool(relu_out_2,
                                      ksize = [1,3,3,1],
                                      strides = [1,1,1,1],
                                      padding = "SAME",
                                      data_format='NHWC',
                                      name = 'pooling_out')
    
    with tf.name_scope('output'):
    
         fc_input = tf.layers.flatten(pooling_out_2,name='flatten')
         fc1_out = tf.layers.dense(fc_input,
                                  1024,
                                  activation=None,
                                  use_bias=True,
                                  kernel_initializer=tf.truncated_normal_initializer(0.01),
                                  bias_initializer=tf.zeros_initializer(),
                                  kernel_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  bias_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  name='fc_1')
         fc2_out = tf.layers.dense(fc1_out,
                                  512,
                                  activation=None,
                                  use_bias=True,
                                  kernel_initializer=tf.truncated_normal_initializer(0.01),
                                  bias_initializer=tf.zeros_initializer(),
                                  kernel_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  bias_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  name='fc_2')
         fc3_out = tf.layers.dense(fc2_out,
                                  256,
                                  activation=None,
                                  use_bias=True,
                                  kernel_initializer=tf.truncated_normal_initializer(0.01),
                                  bias_initializer=tf.zeros_initializer(),
                                  kernel_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  bias_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  name='fc_3')
         logits = tf.layers.dense(fc3_out,
                                  5,
                                  activation=None,
                                  use_bias=True,
                                  kernel_initializer=tf.truncated_normal_initializer(0.01),
                                  bias_initializer=tf.zeros_initializer(),
                                  kernel_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  bias_regularizer=tf.keras.regularizers.l2(l=0.01),
                                  name='fc_output')
    
    loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(labels = onehot_labels,
                                                                     logits = logits),name = 'loss')
    optimizer = tf.train.AdamOptimizer(learning_rate = 0.001,name = 'adam')
    train_op = optimizer.minimize(loss,name = 'train_op')
    
    correct_pred = tf.equal(tf.argmax(logits,1),tf.argmax(onehot_labels,1),name = 'correct_pred')
    c = tf.cast(correct_pred,tf.float32)
    accuracy = tf.reduce_mean(c,name = 'accuracy')
    
    init_op = tf.global_variables_initializer()
         
    tf.summary.scalar('loss',loss)
    tf.summary.scalar('accuracy',accuracy)
    
    from sklearn.utils import shuffle
    with tf.Session() as sess:
         writer = tf.summary.FileWriter('./log/scnn/')
         writer.add_graph(sess.graph)
         merge_all = tf.summary.merge_all()
         sess.run(init_op)
         sess.graph.finalize()
         step = 0
         for epoch in range(EPOCH):
              X,Label = shuffle(data,label,random_state=None)
              idx = 0
              while idx < X.shape[0]:
                   if(idx+BATCH_SIZE>X.shape[0]):
                        x_batch = X[idx:]
                        y_batch = Label[idx:].reshape(-1,1)
                   else:
                        x_batch = X[idx:idx+BATCH_SIZE]
                        y_batch = Label[idx:idx+BATCH_SIZE].reshape(-1,1)
                   print("Step:"+str(step))
                   pre, _, log = sess.run([accuracy,train_op,merge_all],feed_dict = {x:x_batch,y:y_batch})
                   print(pre)
                   writer.add_summary(log,step)
                   idx += BATCH_SIZE
                   step += 1
              l,acc = sess.run([loss,accuracy],feed_dict = {x:x_batch,y:y_batch})
    
              print("Epoch " + str(epoch+1) + ", Minibatch Loss= " + 
                    "{:.4f}".format(l) + ", Training Accuracy= " + 
                    "{:.3f}".format(acc))
         print("Optimization Finished!")
         
  • 相关阅读:
    ASP.Net WebForm温故知新学习笔记:一、aspx与服务器控件探秘
    ASP.Net WebForm温故知新学习笔记:一、aspx与服务器控件探秘
    路由器原理(一)
    python人工智能——机器学习——机器学习基础
    python人工智能——机器学习——机器学习基础
    python人工智能——机器学习——数据的降维
    python人工智能——机器学习——数据的降维
    python人工智能——机器学习——数据的降维
    Common Subsequence
    Common Subsequence
  • 原文地址:https://www.cnblogs.com/guoyaohua/p/9127374.html
Copyright © 2011-2022 走看看