zoukankan      html  css  js  c++  java
  • 测试代码

    #coding:utf-8
    #http://blog.csdn.net/zhuiqiuk/article/details/53376283
    #http://blog.csdn.net/gan_player/article/details/77586489
    # from __future__ import absolute_import, unicode_literals
    # from tensorflow.examples.tutorials.mnist import input_data
    # import tensorflow as tf
    # import shutil
    # import os.path
    # from tensorflow.python.framework import graph_util
    #import mxnet as mx
    # import numpy as np
    # import random
    # import cv2
    # from time import sleep
    # from easydict import EasyDict as edict
    # import logging  
    # import pdb  
    # import math
    # import re
    import tensorflow as tf
    import numpy as np
    #import getmxnetmodel
    
    # def load_checkpoint():  
    #     """ 
    #     Load model checkpoint from file. 
    #     :param prefix: Prefix of model name. 
    #     :param epoch: Epoch number of model we would like to load. 
    #     :return: (arg_params, aux_params) 
    #     arg_params : dict of str to NDArray 
    #         Model parameter, dict of name to NDArray of net's weights. 
    #     aux_params : dict of str to NDArray 
    #         Model parameter, dict of name to NDArray of net's auxiliary states. 
    #     """  
    #     save_dict = mx.nd.load('model-0000.params')  
    #     arg_params = {}  
    #     aux_params = {}  
    #     for k, v in save_dict.items():  
    #         tp, name = k.split(':', 1)  
    #         if tp == 'arg':  
    #             arg_params[name] = v  
    #         if tp == 'aux':  
    #             aux_params[name] = v  
    #     return arg_params, aux_params  
    
    # def convert_context(params, ctx):  
    #     """ 
    #     :param params: dict of str to NDArray 
    #     :param ctx: the context to convert to 
    #     :return: dict of str of NDArray with context ctx 
    #     """  
    #     new_params = dict()  
    #     for k, v in params.items():  
    #         new_params[k] = v.as_in_context(ctx)  
    #     #print new_params[0]  
    #     return new_params  
      
    # def load_param(convert=False, ctx=None):  
    #     """ 
    #     wrapper for load checkpoint 
    #     :param prefix: Prefix of model name. 
    #     :param epoch: Epoch number of model we would like to load. 
    #     :param convert: reference model should be converted to GPU NDArray first 
    #     :param ctx: if convert then ctx must be designated. 
    #     :return: (arg_params, aux_params) 
    #     """  
    #     arg_params, aux_params = load_checkpoint()  
    #     if convert:  
    #         if ctx is None:  
    #             ctx = mx.cpu()  
    #         arg_params = convert_context(arg_params, ctx)  
    #         aux_params = convert_context(aux_params, ctx)  
    #     return arg_params, aux_params  
    
    def conv_weight_variable(params, name):
    #def conv_weight_variable():
        #conv_weight = params[0][name + "_weight"].asnumpy()
        #tf height * width * in_channels * output_channels
        #mxnet output_channels * in_channels * height * width
        height = 5
        width = 5
        inchannel = 1
        outchannel = 2
    
        #conv0 (64, 112, 112) kernel (3, 3) stride (1, 1) pad (1, 1)
        wkernel = 3
        stride = 1
        pad = 1
        dilate  = 1
    
        conv_weight = np.arange(wkernel * wkernel * inchannel * outchannel).reshape((outchannel,inchannel,wkernel,wkernel))
        conv_weight =conv_weight.transpose(3,2,1,0)
        print(conv_weight.shape)
        print('input:',conv_weight)
        conv_weight = tf.Variable(conv_weight, dtype=np.float32, name = "weight")
        return conv_weight
    
    # def bias_variable(shape, params):
    #     initial = tf.constant(0.1, shape=shape)
    #     return tf.Variable(initial)
    
    # def bn_variable(params, name):
    #     bn_beta = params[0][name + "_beta"].asnumpy() 
    #     bn_gamma = params[0][name + "_gamma"].asnumpy() 
    #     bn_moving_mean = params[1][name + "_moving_mean"].asnumpy()
    #     bn_moving_var = params[1][name + "_moving_var"].asnumpy()
    
    #     mean = tf.Variable(bn_moving_mean, dtype=np.float32, name = name + "_moving_mean")
    #     variance = tf.Variable(bn_moving_var, dtype=np.float32, name = name + "_moving_var")
    #     offset = tf.Variable(bn_beta, dtype=np.float32, name = name + "_beta")
    #     scale = tf.Variable(bn_gamma, dtype=np.float32, name = name + "_gamma")
    #     return mean, variance, offset, scale
    
    # def fc_variable(params, name):
    #     fc_weight = params[0][name + "_weight"].asnumpy()
    #     fc_weight = fc_weight.transpose(1,0)
    #     fc_bias = params[0][name + "_bias"].asnumpy()
    #     fc_weight = tf.Variable(fc_weight, dtype=np.float32, name = name + "_weight")
    #     fc_bias = tf.Variable(fc_bias, dtype=np.float32, name = name + "_bias")
    #     return fc_weight, fc_bias
    
    # def act_variable(params, name):
    #     relu_gamma = params[0][name + "_gamma"].asnumpy() 
    #     print(relu_gamma)
    #     #tf_gamma_data = tf.Variable(relu_gamma, dtype=np.float32)
    #     tf_gamma_data = tf.Variable(relu_gamma, dtype=np.float32, name = name + "_gamma")
    #     return tf_gamma_data
    
    # def conv2d(input, filter, stride, padding, name):
    #     kernel = filter.get_shape()[0]
    #     if padding == 0:
    #         conv = tf.nn.conv2d(input, filter, strides=[1, stride, stride, 1], padding='VALID')
    #     elif stride == 2:
    #         kernel_size = kernel
    #         kernel_size_effective = kernel_size
    #         pad_total = kernel_size_effective - 1
    #         pad_beg = pad_total // 2
    #         pad_end = pad_total - pad_beg
    #         #print(pad_beg, pad_end)
    #         input_PadLayer = tf.pad(input, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]], name=name)
    #         conv = tf.nn.conv2d(input_PadLayer, filter, strides=[1, stride, stride, 1], padding='VALID')
    #     else:
    #         conv = tf.nn.conv2d(input, filter, strides=[1, stride, stride, 1], padding='SAME')
    #     return conv
    
    # def BatchNorm(input, mean, variance, offset, scale, variance_epsilon, name):
    #     # x = tf_prelu
    #     # mean = tf.Variable(bn_moving_mean, dtype=np.float32)
    #     # variance = tf.Variable(bn_moving_var, dtype=np.float32)
    #     # offset = tf.Variable(bn_beta, dtype=np.float32)
    #     # scale = tf.Variable(bn_gamma, dtype=np.float32)
    #     # variance_epsilon = 2e-5
    #     tf_bn = tf.nn.batch_normalization(input, mean,variance,offset,scale,variance_epsilon,name=name)
    #     return tf_bn
    
    # def Act(data, act_type, tf_gamma_data, name):
    #     if act_type=='prelu':
    #       actdata = tf.nn.leaky_relu(data, alpha=tf_gamma_data,name=name)
    #     else:
    #       #acydaya = tf.nn.relu(data = data, act_type=act_type, name=name)
    #       actdata = data
    #     return actdata
    
    # def FullyConnected(input, fc_weight, fc_bias, name):
    #     fc = tf.matmul(input, fc_weight) + fc_bias
    #     return fc
    
    # def max_pool_2x2(x):
    #     return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
    #                           strides=[1, 2, 2, 1], padding='SAME')
    
    # def inference(input_image, params):    
    #     x_image = tf.reshape(input_image, [1, 5, 5, 1])
    
    #     stride = 1; pad = 1; name = "conv0"
    #     W_conv0 = conv_weight_variable(params, name)
    #     body = conv2d(x_image, W_conv0, stride, pad, name)
    
    #     # variance_epsilon = 2e-5;name = "bn0"
    #     # mean, variance, offset, scale = bn_variable(params, name)
    #     # body = BatchNorm(body, mean, variance, offset, scale, variance_epsilon, name)
    
    #     # act_type = 'prelu'; name = "relu0"
    #     # tf_gamma_data = act_variable(params, name)
    #     # body = Act(body, act_type, tf_gamma_data, name)
    
    #     # name = "fc0"
    #     # weight_fc, bias_fc = fc_variable(params, name)
    #     # body = tf.reshape(body, [-1, body.get_shape()[1] * body.get_shape()[2]  * body.get_shape()[3]])
    #     # body = FullyConnected(body, weight_fc, bias_fc, name)
    
    #     return body
    
    # def train(export_dir):
    #     mnist = input_data.read_data_sets("datasets", one_hot=True)
    
    #     g = tf.Graph()
    #     with g.as_default():
    #         x = tf.placeholder("float", shape=[None, 784])
    #         y_ = tf.placeholder("float", shape=[None, 10])
    #         keep_prob = tf.placeholder("float")
    
    #         logits = inference(x, keep_prob)
    #         y_conv = tf.nn.softmax(logits)
    
    #         cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
    #         train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    #         correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    #         accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    
    #         sess = tf.Session()
    #         sess.run(tf.initialize_all_variables())
    
            
    #         step = 200
    #         batch_size = 50
    #         for i in range(step+1):
    #             batch = mnist.train.next_batch(batch_size)
    #             if i % 10 == 0:
    #                 train_accuracy = accuracy.eval(
    #                     {x: batch[0], y_: batch[1], keep_prob: 1.0}, sess)
    #                 print "step %d, training accuracy %g" % (i, train_accuracy)
    #             train_step.run(
    #                 {x: batch[0], y_: batch[1], keep_prob: 0.5}, sess)
    
    #         print "test accuracy %g" % accuracy.eval(
    #             {x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}, sess)
    
    #         saver = tf.train.Saver()
    #         checkpoint_file = os.path.join(export_dir, 'model.ckpt')
    #         saver.save(sess, checkpoint_file, global_step=step)
    #         checkpoint_file = os.path.join(export_dir, 'model.ckpt')
    
    # def export_pb_model(model_name):
    #   graph = tf.Graph()
    #   with graph.as_default():
    #     input_image = tf.placeholder("float", shape=[None,28*28], name='inputdata')
    #     keep_prob = tf.placeholder("float",  name = 'keep_probdata')
    #     logits = inference(input_image, keep_prob)
    #     y_conv = tf.nn.softmax(logits,name='outputdata')
    #     restore_saver = tf.train.Saver()
    
    #   with tf.Session(graph=graph) as sess:
    #     sess.run(tf.global_variables_initializer())
    #     latest_ckpt = tf.train.latest_checkpoint('log')
    #     restore_saver.restore(sess, latest_ckpt)
    #     output_graph_def = tf.graph_util.convert_variables_to_constants(
    #         sess, graph.as_graph_def(), ['outputdata'])
    
    #     #    tf.train.write_graph(output_graph_def, 'log', model_name, as_text=False)
    #     with tf.gfile.GFile(model_name, "wb") as f:  
    #         f.write(output_graph_def.SerializeToString()) 
    
    # def test_pb_model(model_name):
    #     mnist = input_data.read_data_sets("datasets", one_hot=True)
    
    #     with tf.Graph().as_default():
    #         output_graph_def = tf.GraphDef()
    #         output_graph_path = model_name
    #     #    sess.graph.add_to_collection("input", mnist.test.images)
    
    #         with open(output_graph_path, "rb") as f:
    #             output_graph_def.ParseFromString(f.read())
    #             tf.import_graph_def(output_graph_def, name="")
    
    #         with tf.Session() as sess:
    
    #             tf.initialize_all_variables().run()
    #             input_x = sess.graph.get_tensor_by_name("inputdata:0")        
    #             output = sess.graph.get_tensor_by_name("outputdata:0")
    #             keep_prob = sess.graph.get_tensor_by_name("keep_probdata:0")
    
    #             y_conv_2 = sess.run(output,{input_x:mnist.test.images, keep_prob: 1.0})
    #             print( "y_conv_2", y_conv_2)
    
    #             # Test trained model
    #             #y__2 = tf.placeholder("float", [None, 10])
    #             y__2 = mnist.test.labels
    #             correct_prediction_2 = tf.equal(tf.argmax(y_conv_2, 1), tf.argmax(y__2, 1))
    #             print ("correct_prediction_2", correct_prediction_2 )
    #             accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, "float"))
    #             print ("accuracy_2", accuracy_2)
    
    #             print ("check accuracy %g" % accuracy_2.eval())
    
    # def showmodel():
    #     result =  load_param() 
    #     #pdb.set_trace()  
    #     print('result is', result)  
    #     #print result
    #     for dic in result:
    #         #dic = sorted(dic.keys())
    #         for key in sorted(dic.keys()):
    #             print(key, dic[key].shape)
    #             #print(key,dic[key])
    
    #     print('one of results conv0_weight is:')
    #     conv0_weight = result[0]['conv0_weight'].asnumpy()
    #     print('name:',conv0_weight.shape)
    #     print('value:',result[0]['conv0_weight'].asnumpy()) 
    
    # def get_netpoint_params(params, name):
    #     # (u'bn0_beta', (2L,))
    #     # (u'bn0_gamma', (2L,))
    #     # (u'conv0_weight', (2L, 1L, 3L, 3L))
    #     # (u'fc2_bias', (10L,))
    #     # (u'fc2_weight', (10L, 1568L))
    #     # (u'relu0_gamma', (2L,))
    #     # (u'bn0_moving_mean', (2L,))
    #     # (u'bn0_moving_var3 (2L,))
    #     name_split = name.strip().split('_')
    #     train_variable = ['conv','bn','relu','fc']
    #     for var in train_variable:
    #         index = var.find('a',0) 
    #         if index != -1:
    #             continue
    
    #     if index == -1:
    #         print("netpoint name is error")
    #     else:
    #         if index == 0:
    #             conv_weight = params[0][name + "_weight"].asnumpy()
    #         elif index == 1:
    #             bn_beta = params[0][name + "_beta"].asnumpy() 
    #             bn_gamma = params[0][name + "_gamma"].asnumpy() 
    #             bn__moving_mean = params[0][name + "__moving_mean"].asnumpy() 
    #             bn_moving_var = params[0][name + "_moving_var"].asnumpy() 
    #         elif index == 2:
    #             relu_gamma = params[0][name + "_gamma"].asnumpy() 
    #         elif index == 3:
    #             fc_weight = params[0][name + "_weight"].asnumpy() 
    #             fc_bias = params[0][name + "_bias"].asnumpy() 
    
    # def softmax(x):
    #     """
    #     Compute the softmax function for each row of the input x.
    
    #     Arguments:
    #     x -- A N dimensional vector or M x N dimensional numpy matrix.
    
    #     Return:
    #     x -- You are allowed to modify x in-place
    #     """
    #     orig_shape = x.shape
    
    #     if len(x.shape) > 1:
    #         # Matrix
    #         exp_minmax = lambda x: np.exp(x - np.max(x))
    #         denom = lambda x: 1.0 / np.sum(x)
    #         x = np.apply_along_axis(exp_minmax,1,x)
    #         denominator = np.apply_along_axis(denom,1,x) 
            
    #         if len(denominator.shape) == 1:
    #             denominator = denominator.reshape((denominator.shape[0],1))
            
    #         x = x * denominator
    #     else:
    #         # Vector
    #         x_max = np.max(x)
    #         x = x - x_max
    #         numerator = np.exp(x)
    #         denominator =  1.0 / np.sum(numerator)
    #         x = numerator.dot(denominator)
            
    #     assert x.shape == orig_shape
    #     return x
    
    def eval():
        #mxnet_params = load_param()
    
        # label = 9
        # img = cv2.imread('./mnist_img/mnist_train_8.jpg',0)
        # img = cv2.resize(img,(5,5))
        # img = np.expand_dims(img, axis=0)
        # img = np.expand_dims(img, axis=0)
        # data = np.arange(5 * 5 * 1).reshape((1,1,5,5))
        # data = data.transpose(0,3,2,1)
        # input_image = tf.Variable(data, dtype=np.float32)
        # print('img',img)
        # #logdit = inference(input_image, mxnet_params)
        # #x_image = tf.reshape(input_image, [1, 5, 5, 1])
        # stride = 1; pad = 1; name = "conv0"
        # W_conv0 = conv_weight_variable(mxnet_params, name)
        # body = conv2d(input_image, W_conv0, stride, pad, name)
        # init = tf.global_variables_initializer()
        # with tf.Session() as sess:
        #     sess.run(init)
        #     fc = sess.run(body)
        #     print(fc)
        # with tf.Graph().as_default():
        #     #with tf.Session() as sess:
        #     input_image = tf.placeholder("float", [1, 1, 28, 28])
        #     logdit = inference(input_image, mxnet_params)
        #     init = tf.global_variables_initializer()
        #     with tf.Session() as sess:
        #         sess.run(init)
        #         # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
        #         # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        #         sess.run(tf.initialize_all_variables())
        #         fc = sess.run(logdit,{input_image:img})
            
            # print( "fc", fc)
            # softmax_fc = softmax(fc)
            # print(softmax_fc)
            # max_index = np.argmax(softmax_fc, axis=1)
            # print("label ", label)
            # print("predit ", max_index)
            # print(max_index == label)
        height = 5
        width = 5
        inchannel = 1
        outchannel = 2
    
        #conv0 (64, 112, 112) kernel (3, 3) stride (1, 1) pad (1, 1)
        wkernel = 3
        stride = 1
        pad = 1
        dilate  = 1
    
        # w = np.arange(wkernel * wkernel * inchannel * outchannel).reshape((outchannel,inchannel,wkernel,wkernel))
        # b = np.array([0])
        mxnet_params = 1
        w = conv_weight_variable(mxnet_params,"conv0")
        data = np.arange(height * width * inchannel).reshape((1,inchannel,height,width))
        print('input:',data)
        print('weight:',w)
    
        data = data.transpose(0,3,2,1)
        #w = w.transpose(3,2,1,0)
        # print('input:',data)
        # print('inputshape:',data.shape)
        # print('weight:',w)
        # print('weight:',w.shape)
        input = tf.Variable(data, dtype=np.float32)
        #input_reshape = tf.reshape(input, [1,inchannel,height,width])
        filter = tf.Variable(w, dtype=np.float32)
    
        conv = tf.nn.conv2d(input, filter, strides=[1, stride, stride, 1], padding='SAME')
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)
            #print("input: 
    ", sess.run(input))
            input_reshape = sess.run(input).transpose(0,3,2,1)
            #print("input_reshape: 
    ", input_reshape)
            #print("filter: 
    ", sess.run(filter))
            filter_reshape = sess.run(filter).transpose(3,2,1,0)
            #print("filter_reshape: 
    ", filter_reshape)
            #print("conv ", sess.run(conv))
            conv_reshape = sess.run(conv).transpose(0,3,2,1)
            print("conv_reshape: 
    ", conv_reshape)
    
            # tf_prelu_reshape = sess.run(tf_prelu).transpose(0,3,2,1)
            # print("tf_prelu_reshape: 
    ", tf_prelu_reshape)
            
            # tf_bn_reshape = sess.run(tf_bn).transpose(0,3,2,1)
            # print("tf_bn_reshape: 
    ", tf_bn_reshape)
    
    
    if __name__ == '__main__':
    
        eval()
        #showmodel()
        #load mxnet model
        #mxnet_params = load_param()
        # import re
        # print(re.search('www', 'www.runoob.com').span())  # 在起始位置匹配
        # print(re.search('com', 'www.runoob.com').span())  # 不在起始位置匹配
    
        # info = 'abca'
        # print info.find('a')##从下标0开始,查找在字符串里第一个出现的子串,返回结果:0
    
        # info = 'abca'
        # print info.find('a',1)##从下标1开始,查找在字符串里第一个出现的子串:返回结果3
    
        # info = 'abca'
        # print info.find('333')##返回-1,查找不到返回-1
    
    
        # export_dir = './log'
        # if os.path.exists(export_dir):
        #     shutil.rmtree(export_dir)
        #训练并保存模型ckpt
        #train(export_dir)
        #model_name = os.path.join(export_dir, 'mnist.pb')
        # #ckpt模型转换为pb模型
        # export_pb_model(model_name)
        # #测试pb模型
        # test_pb_model(model_name)
    from __future__ import absolute_import
    from __future__ import division
    from __future__ import print_function
    
    from scipy import misc
    import sys
    import os
    import argparse
    import tensorflow as tf
    import numpy as np
    import mxnet as mx
    import random
    import cv2
    from time import sleep
    from easydict import EasyDict as edict
    import logging  
    import pdb  
    import math
    
    def load_checkpoint():  
        """ 
        Load model checkpoint from file. 
        :param prefix: Prefix of model name. 
        :param epoch: Epoch number of model we would like to load. 
        :return: (arg_params, aux_params) 
        arg_params : dict of str to NDArray 
            Model parameter, dict of name to NDArray of net's weights. 
        aux_params : dict of str to NDArray 
            Model parameter, dict of name to NDArray of net's auxiliary states. 
        """  
        save_dict = mx.nd.load('model-0000.params')  
        arg_params = {}  
        aux_params = {}  
        for k, v in save_dict.items():  
            tp, name = k.split(':', 1)  
            if tp == 'arg':  
                arg_params[name] = v  
            if tp == 'aux':  
                aux_params[name] = v  
        return arg_params, aux_params  
    
    def convert_context(params, ctx):  
        """ 
        :param params: dict of str to NDArray 
        :param ctx: the context to convert to 
        :return: dict of str of NDArray with context ctx 
        """  
        new_params = dict()  
        for k, v in params.items():  
            new_params[k] = v.as_in_context(ctx)  
        #print new_params[0]  
        return new_params  
      
    def load_param(convert=False, ctx=None):  
        """ 
        wrapper for load checkpoint 
        :param prefix: Prefix of model name. 
        :param epoch: Epoch number of model we would like to load. 
        :param convert: reference model should be converted to GPU NDArray first 
        :param ctx: if convert then ctx must be designated. 
        :return: (arg_params, aux_params) 
        """  
        arg_params, aux_params = load_checkpoint()  
        if convert:  
            if ctx is None:  
                ctx = mx.cpu()  
            arg_params = convert_context(arg_params, ctx)  
            aux_params = convert_context(aux_params, ctx)  
        return arg_params, aux_params  
    
    def srctrain():
        logging.getLogger().setLevel(logging.DEBUG)  
          
        batch_size = 100  
        mnist = mx.test_utils.get_mnist()  
        train_iter = mx.io.NDArrayIter(mnist['train_data'], mnist['train_label'], batch_size, shuffle=True)  
        val_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size)  
          
        data = mx.sym.var('data')   
        # first conv layer  
        conv1= mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20)  
        tanh1= mx.sym.Activation(data=conv1, act_type="tanh")  
        pool1= mx.sym.Pooling(data=tanh1, pool_type="max", kernel=(2,2), stride=(2,2))  
        # second conv layer  
        conv2= mx.sym.Convolution(data=pool1, kernel=(5,5), num_filter=50)  
        tanh2= mx.sym.Activation(data=conv2, act_type="tanh")  
        pool2= mx.sym.Pooling(data=tanh2, pool_type="max", kernel=(2,2), stride=(2,2))  
        # first fullc layer  
        flatten= mx.sym.Flatten(data=pool2)  
        fc1= mx.symbol.FullyConnected(data=flatten, num_hidden=500)  
        tanh3= mx.sym.Activation(data=fc1, act_type="tanh")  
        # second fullc  
        fc2= mx.sym.FullyConnected(data=tanh3, num_hidden=10)  
        # softmax loss  
        lenet= mx.sym.SoftmaxOutput(data=fc2, name='softmax')  
          
        # create a trainable module on GPU 0  
        lenet_model = mx.mod.Module(  
                        symbol=lenet,   
                        context=mx.cpu())  
          
        # train with the same  
        lenet_model.fit(train_iter,  
                        eval_data=val_iter,  
                        optimizer='sgd',  
                        optimizer_params={'learning_rate':0.1},  
                        eval_metric='acc',  
                        batch_end_callback = mx.callback.Speedometer(batch_size, 100),  
                        num_epoch=2)  
    
    
        #save model
        msave = 0
        print('saving', msave)
        arg, aux = lenet_model.get_params()
        prefix = "./model"
        mx.model.save_checkpoint(prefix, msave, lenet_model.symbol, arg, aux)
    
    def Act(data, act_type, name):
        if act_type=='prelu':
          body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name)
        else:
          body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
        return body
    
    global input_size
    def out_dim(input = (3,112,112), num_filter = 64,
      kernel = (3,3), stride = (1,1), pad = (1,1), dilate = (1,1), name="point", isconv=True):
      if isconv==True:
        if pad == None:
            pad=(0,0)
        channels = input[0]
        height = input[1]
        width = input[2]
        x = height
        p = pad[0]
        s = stride[0]
        d = dilate[0]
        k = kernel[0]
        output_height = (int)(math.floor((x + 2 * p - d * (k - 1) - 1) / s) + 1)
        y = width
        p = pad[1]
        s = stride[1]
        d = dilate[1]
        k = kernel[1]
        output_width = (int)(math.floor((x + 2 * p - d * (k - 1) - 1) / s) + 1)
        output = (num_filter, output_height, output_width)
      else:
        output = input
      print(name, output, "kernel",kernel, "stride", stride, "pad", pad)
      return output
    
    def Conv(**kwargs):
        #name = kwargs.get('name')
        #_weight = mx.symbol.Variable(name+'_weight')
        #_bias = mx.symbol.Variable(name+'_bias', lr_mult=2.0, wd_mult=0.0)
        #body = mx.sym.Convolution(weight = _weight, bias = _bias, **kwargs)
        name = kwargs.get('name')
        num_filter = kwargs.get('num_filter')
        kernel = kwargs.get('kernel')
        stride = kwargs.get('stride')
        pad = kwargs.get('pad')
        global input_size
        input_size = out_dim(input = input_size, num_filter = num_filter,
                              kernel=kernel, stride=stride, pad=pad, dilate = (1,1), name=name, isconv=True)
        body = mx.sym.Convolution(**kwargs)
        return body
    
    def resnettrain():
        logging.getLogger().setLevel(logging.DEBUG)  
          
        batch_size = 100  
        bn_mom = 0.9
        act_type = 'prelu'
        global input_size
        input_size = (1,28,28)
    
        mnist = mx.test_utils.get_mnist()  
        train_iter = mx.io.NDArrayIter(mnist['train_data'], mnist['train_label'], batch_size, shuffle=True)  
        val_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size)  
        
        #label = mx.sym.var('softmax_label')
        data = mx.sym.var('data')   
        # first conv layer  
        body = Conv(data=data, num_filter=2, kernel=(3,3), stride=(1,1), pad=(1, 1),
                                    no_bias=True, name="conv0")
        body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
        print('bn0', input_size)
        body = Act(data=body, act_type=act_type, name='relu0')
        print('relu0', input_size)
    
    
        # # second conv layer  
        # body = Conv(data=body, num_filter=4, kernel=(3,3), stride=(2,2), pad=(1, 1),
        #                             no_bias=True, name="conv1")
        # body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
        # print('bn1', input_size)
        # body = Act(data=body, act_type=act_type, name='relu1')
        # print('relu1', input_size)
    
        # # third conv layer  
        # body = Conv(data=body, num_filter=6, kernel=(1,1), stride=(2,2), pad=(0, 0),
        #                             no_bias=True, name="conv2")
        # body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn2')
        # print('bn2', input_size)
        # body = Act(data=body, act_type=act_type, name='relu2')
        # print('relu2', input_size)
    
        # first fullc layer  
        #body= mx.sym.Flatten(data=body)  
        #body= mx.symbol.FullyConnected(data=body, num_hidden=20, name='fc1')  
        #body= mx.sym.Activation(data=body, act_type="tanh", name="Act1")  
        #body= Act(data=body, act_type=act_type, name='relu1') 
        # second fullc  
        body= mx.sym.FullyConnected(data=body, num_hidden=10, name='fc0')  
        #softmax loss  
        lenet= mx.sym.SoftmaxOutput(data=body, name='softmax')  
          
        # create a trainable module on GPU 0  
        lenet_model = mx.mod.Module(  
                        symbol=lenet,   
                        context=mx.cpu())  
          
        # train with the same  
        lenet_model.fit(train_iter,  
                        eval_data=val_iter,  
                        optimizer='sgd',  
                        optimizer_params={'learning_rate':0.1},  
                        eval_metric='acc',  
                        batch_end_callback = mx.callback.Speedometer(batch_size, 100),  
                        num_epoch=2)  
    
        #save model
        msave = 0
        print('saving', msave)
        arg, aux = lenet_model.get_params()
        prefix = "./model"
        mx.model.save_checkpoint(prefix, msave, lenet_model.symbol, arg, aux)
    
    def showmodel():
        result =  load_param() 
        #pdb.set_trace()  
        #rint('result is', result)  
        #print result
        for dic in result:
            #dic = sorted(dic.keys())
            for key in sorted(dic.keys()):
                print(key, dic[key].shape)
                #print(key,dic[key])
    
        print('one of results conv0_weight is:')
        conv0_weight = result[0]['conv0_weight'].asnumpy()
        print('name:',conv0_weight.shape)
        print('value:',result[0]['conv0_weight'].asnumpy())  
    
    class MnistModel:
          def __init__(self, args):
            self.args = args
            model = edict()
            _vec = args.model.split(',')
            assert len(_vec)==2
            prefix = _vec[0]
            epoch = int(_vec[1])
            print('loading',prefix, epoch)
            ctx = mx.gpu(args.gpu)
            sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
            all_layers = sym.get_internals()
            sym = all_layers['fc0_output']
            model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
            image_size = (28,28)
            #model.bind(data_shapes=[('data', (1, 1, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (1,))])
            model.bind(data_shapes=[('data', (1, 1, image_size[0], image_size[1]))])
            model.set_params(arg_params, aux_params)
            self.model = model
    
    
         def get_feature(self, img):
            #face_img is bgr image
            #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            #aligned = np.transpose(img, (2,0,1))
            aligned = img
    
            #print(nimg.shape)
            embedding = None
            input_blob = np.expand_dims(aligned, axis=0)
            data = mx.nd.array(input_blob)
            #label = mx.nd.array((1,))
            db = mx.io.DataBatch(data=(data,))
            self.model.forward(db, is_train=False)
            _embedding = self.model.get_outputs()[0].asnumpy()
            #print(_embedding.shape)
            embedding = _embedding
            return embedding
    
    def softmax(x):
        """
        Compute the softmax function for each row of the input x.
    
        Arguments:
        x -- A N dimensional vector or M x N dimensional numpy matrix.
    
        Return:
        x -- You are allowed to modify x in-place
        """
        orig_shape = x.shape
    
        if len(x.shape) > 1:
            # Matrix
            exp_minmax = lambda x: np.exp(x - np.max(x))
            denom = lambda x: 1.0 / np.sum(x)
            x = np.apply_along_axis(exp_minmax,1,x)
            denominator = np.apply_along_axis(denom,1,x) 
            
            if len(denominator.shape) == 1:
                denominator = denominator.reshape((denominator.shape[0],1))
            
            x = x * denominator
        else:
            # Vector
            x_max = np.max(x)
            x = x - x_max
            numerator = np.exp(x)
            denominator =  1.0 / np.sum(numerator)
            x = numerator.dot(denominator)
            
        assert x.shape == orig_shape
        return x
    
    def eval():
        parser = argparse.ArgumentParser(description='mnist model test')
        # general
        parser.add_argument('--image-size', default='28,28', help='')
        parser.add_argument('--model', default='./model,0', help='path to load model.')
        parser.add_argument('--gpu', default=0, type=int, help='gpu id')
        args = parser.parse_args()
    
        model = MnistModel(args)
    
        label = 9
        img = cv2.imread('./mnist_img/mnist_train_8.jpg',0)
        img = cv2.resize(img,(28,28))
        img = np.expand_dims(img, axis=0)
        f1 = model.get_feature(img)
        print(f1.shape)
        print(f1)
        softmax_f1 = softmax(f1)
        print(softmax_f1)
        max_index = np.argmax(softmax_f1, axis=1)
        print("label ", label)
        print("predit ", max_index)
        print(max_index == label)
    
    def rgb2gray(rgb):
        return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
    
    if __name__ == '__main__':
        #resnettrain()
        showmodel()
        #eval()
        # a = np.array([[1,2,3,4]])
        # print(a.shape)
        # #a = np.array([[1,2,3,4],[1,2,3,4]])
        # print(softmax(a))
        # y_hat = a
        # Y=np.array([[0,0,0,1]])
        # max_index = np.argmax(y_hat, axis=1)
        # print(max_index)
        # y_hat[np.arange(y_hat.shape[0]), max_index] = 1
        # print(y_hat)
        # accuracy = np.argmax(y_hat, axis=1)==np.argmax(Y, axis=1) 
        # print(accuracy)
    
        # img = cv2.imread('./mnist_img/mnist_train_10.jpg',0)
        # print(img.shape)
        # #img.reshape((1,28,28))
        # #img = rgb2gray(img)
        # print(img)
        # img = np.expand_dims(img, axis=0)
        # #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # print(img.shape)
        # print(img)
  • 相关阅读:
    小程序配置 全局配置
    浅谈 Nginx和LVS的各种优缺点
    LVS负载均衡(LVS简介、三种工作模式、十种调度算法)
    用Camshift算法对指定目标进行跟踪
    AsyncTask源代码解析
    shell中的${},##和%%的使用
    hdu 1081 & poj 1050 To The Max(最大和的子矩阵)
    POJ 1141 Brackets Sequence (区间DP)
    Ejb in action(六)——拦截器
    7.JAVA编程思想笔记隐藏实施过程
  • 原文地址:https://www.cnblogs.com/adong7639/p/9222408.html
Copyright © 2011-2022 走看看