zoukankan      html  css  js  c++  java
  • 神经网络:caffe特征可视化的代码例子

    caffe特征可视化的代码例子

    不少读者看了我前面两篇文章

    总结一下用caffe跑图片数据的研究流程

    deep learning实践经验总结2--准确率再次提升,到达0.8。再来总结一下

    之后。想知道我是怎么实现特征可视化的。


    简单来说,事实上就是让神经网络正向传播一次。然后把某层的特征值给取出来。然后转换为图片保存。


    以下我提供一个demo,大家能够依据自己的需求改动。


    先看看我的demo的用法。

    visualize_features.bin net_proto pretrained_net_proto iterations  [CPU/GPU]  img_list_file dstdir laydepth

    visualize_features.bin是cpp编译出来的可运行文件

    以下看看各參数的意义:

    1 net_proto:caffe规定的一种定义网络结构的文件格式,后缀名为".prototxt"。

    这个文件定义了网络的输入,已经相关參数,还有就是总体的网络结构。

    2 pretrained_net_proto:这个是已经训练好了的模型

    3 iterations:迭代次数

    4 [CPU/GPU]:cpu还是gpu模式

    5 img_list_file:待測试的文件名称列表。我这里须要这个主要是为了得到图片的类名。

    6 dstdir:图片输出的目录

    7 laydepth:须要输出哪一层的特征


    以下是一个实例样例:

    ./visualize_features.bin /home/linger/linger/caffe-action/caffe-master/examples/cifar10/cifar10_full_test.prototxt /home/linger/linger/caffe-action/caffe-master/examples/cifar10/cifar10_full_iter_60000 20 GPU /home/linger/linger/testfile/skirt_test_attachment/image_filename /home/linger/linger/testfile/innerproduct/ 7


    以下是源码:

    // Copyright 2013 Yangqing Jia
    //
    // This is a simple script that allows one to quickly test a network whose
    // structure is specified by text format protocol buffers, and whose parameter
    // are loaded from a pre-trained network.
    // Usage:
    //    test_net net_proto pretrained_net_proto iterations [CPU/GPU]
    
    #include <cuda_runtime.h>
    #include <fstream>
    #include <iostream>
    #include <cstring>
    #include <cstdlib>
    #include <algorithm>
    #include <vector>
    #include <utility>
    #include "caffe/caffe.hpp"
    #include <opencv2/highgui/highgui.hpp>
    #include <opencv2/highgui/highgui_c.h>
    #include <opencv2/imgproc/imgproc.hpp>
    
    using std::make_pair;
    using std::pair;
    using namespace caffe;  // NOLINT(build/namespaces)
    using namespace std;
    
    vector<string> fileNames;
    char * filelist;
    
    /*
     * 读入的文件的内容格式相似这样子的:全局id 类名_所在类的id.jpg
    0 一步裙_0.jpg
    1 一步裙_1.jpg
    2 一步裙_10.jpg
     */
    void readFile()
    {
    	if(fileNames.empty())
    	{
    		ifstream read(filelist);
    		//"/home/linger/linger/testfile/test_attachment/image_filename"
    		// "/home/linger/imdata/test_files_collar.txt"
    		//  "/home/linger/linger/testfile/testfilename"
    		if(read.is_open())
    		{
    			while(!read.eof())
    			{
    				string name;
    				int id;
    				read>>id>>name;
    				fileNames.push_back(name);
    			}
    		}
    	}
    }
    
    /*
     * 依据图片id获取类名
     */
    string getClassNameById(int id)
    {
    	readFile();
    	int index = fileNames[id].find_last_of('_') ;
    	return fileNames[id].substr(0, index);
    }
    
    
    
    void writeBatch(const float* data,int num,int channels,int width,int height,int startID,const char*dir)
    {
    	for(int id = 0;id<num;id++)
    	{
    		for(int channel=0;channel<channels;channel++)
    		{
    			cv::Mat mat(height,width, CV_8UC1);//高宽
    			vector<vector<float> > vec;
    			vec.resize(height);
    			float max = -1;
    			float min = 999999;
    			for(int row=0;row<height;row++)
    			{
    				vec[row].resize(width);
    				for(int col=0;col<width;col++)
    				{
    					vec[row][col] =
    							data[id*channels*width*height+channel*width*height+row*width+col];
    					if(max<vec[row][col])
    					{
    						max = vec[row][col];
    					}
    					if(min>vec[row][col])
    					{
    						min = vec[row][col];
    					}
    
    
    				}
    			}
    
    			for(int row=0;row<height;row++)
    			{
    			  	for(int col=0;col<width;col++)
    				{
    					vec[row][col] = 255*((float)(vec[row][col]-min))/(max-min);
    					uchar& img = mat.at<uchar>(row,col);
    					img= vec[row][col];
    
    				}
    			}
    			char filename[100];
    			string label = getClassNameById(startID+id);
    			string file_reg =dir;
    			file_reg+="%s%05d_%05d.png";
    			snprintf(filename, 100, file_reg.c_str(), label.c_str(),startID+id,channel);
    			//printf("%s
    ",filename);
    			cv::imwrite(filename, mat);
    		}
    
    	}
    }
    
    int main(int argc, char** argv)
    {
      if (argc < 4)
      {
        LOG(ERROR) << "visualize_features.bin net_proto pretrained_net_proto iterations "
            << "[CPU/GPU] img_list_file dstdir laydepth";
        return 0;
      }
      /*
    
      ./visualize_features.bin /home/linger/linger/caffe-action/caffee-ext/Caffe_MM/prototxt/triplet/triplet_test_simple.prototxt /home/linger/linger/caffe-action/caffee-ext/Caffe_MM/snapshorts/_iter_100000 8 GPU /home/linger/linger/testfile/test_attachment/image_filename /home/linger/linger/testfile/innerproduct/ 6
    
      */
    
      filelist = argv[5];
      cudaSetDevice(0);
      Caffe::set_phase(Caffe::TEST);
    
      if (argc == 5 && strcmp(argv[4], "GPU") == 0)
      {
        LOG(ERROR) << "Using GPU";
        Caffe::set_mode(Caffe::GPU);
      }
      else
      {
        LOG(ERROR) << "Using CPU";
        Caffe::set_mode(Caffe::CPU);
      }
    
      NetParameter test_net_param;
      ReadProtoFromTextFile(argv[1], &test_net_param);
      Net<float> caffe_test_net(test_net_param);
      NetParameter trained_net_param;
      ReadProtoFromBinaryFile(argv[2], &trained_net_param);
      caffe_test_net.CopyTrainedLayersFrom(trained_net_param);
    
      int total_iter = atoi(argv[3]);
      LOG(ERROR) << "Running " << total_iter << " Iterations.";
    
      double test_accuracy = 0;
      vector<Blob<float>*> dummy_blob_input_vec;
    
      int startID = 0;
      int nums;
      int dims;
      int batchsize = test_net_param.layers(0).layer().batchsize();
    
      int laynum = caffe_test_net.bottom_vecs().size();
      printf("num of layers:%d
    ",laynum);
    
      for (int i = 0; i < total_iter; ++i)
      {
        const vector<Blob<float>*>& result =
            caffe_test_net.Forward(dummy_blob_input_vec);
    
        int laydepth = atoi(argv[7]);
    
        Blob<float>* features = (*(caffe_test_net.bottom_vecs().begin()+laydepth))[0];//调整第几层就可以
    
        nums = features->num();
        dims= features->count()/features->num();
    
        int num = features->num();
        int channels = features->channels();
        int width = features->width();
        int height = features->height();
        printf("channels:%d,%d,height:%d
    ",channels,width,height);
        writeBatch(features->cpu_data(),num,channels,width,height,startID,argv[6]);
        startID += nums;
    
      }
    
      return 0;
    }
    






  • 相关阅读:
    X5webview完美去掉分享功能和缓存功能(2)
    bintray 在android3.2上传遇到的问题
    x5webview 自定义全屏界面
    X5webview去掉分享功能和缓存功能
    buglly热更新集成遇到的那些坑
    腾讯x5webview集成实战
    动态权限<三>华为小米特殊机制
    android 判断应用是否在前台显示
    动态权限<二>之淘宝、京东、网易新闻 权限申请交互设计对比分析
    android 图片二维码识别和保存(二)
  • 原文地址:https://www.cnblogs.com/yxysuanfa/p/7380573.html
Copyright © 2011-2022 走看看