zoukankan      html  css  js  c++  java
  • 【caffe Net】使用举例和代码中文注释

     首先是Net使用的小例子:

    #include <vector>
    #include <iostream>
    #include <caffe/net.hpp>
    using namespace std;
    using namespace caffe;
    int main()
    {
        std::string proto("./bambootry/deploy.prototxt");
        Net<float> nn(proto,caffe::TEST);
        vector<string> bn=nn.blob_names();//获取Net中所有Blob对象名
        for(int i=0;i<bn.size();i++)
        {
            cout<<"Blob #"<<i<<" : "<<bn[i]<<endl;
        }
        return 0;
    }

    linux下编译(bambootry为自己创建的文件夹)

    g++ -o ./bambootry/netapp ./bambootry/net.cpp -I ./include -D CPU_ONLY 
    -I ./.build_release/src/ -L ./build/lib -lcaffe -lglog -lboost_system 
    -lprotobuf

    结果:

    ……中间省略

    继续省略……

    代码注释

     src/caffe/proto/caffe.proto中NetParameter

     1 message NetParameter {
     2   optional string name = 1; // consider giving the network a name 网络名称
     3   // DEPRECATED. See InputParameter. The input blobs to the network.
     4   repeated string input = 3;//网络的输入blob名称,可以有多个blob
     5   // DEPRECATED. See InputParameter. The shape of the input blobs.
     6   repeated BlobShape input_shape = 8;//输入Blob维度信息
     7 
     8   // 4D input dimensions -- deprecated.  Use "input_shape" instead.
     9   // If specified, for each input blob there should be four
    10   // values specifying the num, channels, height and width of the input blob.
    11   // Thus, there should be a total of (4 * #input) numbers.
    12   repeated int32 input_dim = 4;//旧版维度信息
    13 
    14   // Whether the network will force every layer to carry out backward operation.
    15   // If set False, then whether to carry out backward is determined
    16   // automatically according to the net structure and learning rates.
    17   // 网络是否强制每个层进行反向传播运算,如果设置为false,则由网络结构和学习速率自动确定是否进行反向传播运算
    18   optional bool force_backward = 5 [default = false];
    19   // The current "state" of the network, including the phase, level, and stage.
    20   // Some layers may be included/excluded depending on this state and the states
    21   // specified in the layers' include and exclude fields.
    22   optional NetState state = 6;//网络当前状态,phase, level, 和 stage,根据状态可以确定是否包含某些层
    23 
    24   // Print debugging information about results while running Net::Forward,
    25   // Net::Backward, and Net::Update. 
    26   // 运行Net::Forward,Net::Backward, and Net::Update是否打印结果的调试信息
    27   optional bool debug_info = 7 [default = false];
    28 
    29   // The layers that make up the net.  Each of their configurations, including
    30   // connectivity and behavior, is specified as a LayerParameter.
    31   //组成net的所有层。每个层的配置都包括连接属性和行为,由LayerParameter定义。
    32   //ID设为100可以保证层描述置于末尾
    33   repeated LayerParameter layer = 100;  // ID 100 so layers are printed last.
    34 
    35   // DEPRECATED: use 'layer' instead.
    36   repeated V1LayerParameter layers = 2;//已淘汰
    37 }

     include/caffe/net.hpp

      1 #ifndef CAFFE_NET_HPP_
      2 #define CAFFE_NET_HPP_
      3 
      4 #include <map>
      5 #include <set>
      6 #include <string>
      7 #include <utility>
      8 #include <vector>
      9 
     10 #include "caffe/blob.hpp"
     11 #include "caffe/common.hpp"
     12 #include "caffe/layer.hpp"
     13 #include "caffe/proto/caffe.pb.h"
     14 
     15 namespace caffe {
     16 
     17 /**
     18  * @brief Connects Layer%s together into a directed acyclic graph (DAG)
     19  *        specified by a NetParameter.
     20  *
     21  * TODO(dox): more thorough description.
     22  */
     23 template <typename Dtype>
     24 class Net {
     25  public:
     26   explicit Net(const NetParameter& param);
     27   explicit Net(const string& param_file, Phase phase,
     28       const int level = 0, const vector<string>* stages = NULL);
     29   virtual ~Net() {}
     30 
     31   /// @brief Initialize a network with a NetParameter.
     32   void Init(const NetParameter& param);//用NetParameter对象初始化Net
     33 
     34   /**
     35    * @brief Run Forward and return the result.
     36    *
     37    */
     38   //前向传播
     39   const vector<Blob<Dtype>*>& Forward(Dtype* loss = NULL);
     40   /// @brief DEPRECATED; use Forward() instead.已舍弃
     41   const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL) {
     42     LOG_EVERY_N(WARNING, 1000) << "DEPRECATED: ForwardPrefilled() "
     43         << "will be removed in a future version. Use Forward().";
     44     return Forward(loss);
     45   }
     46 
     47   /**
     48    * The From and To variants of Forward and Backward operate on the
     49    * (topological) ordering by which the net is specified. For general DAG
     50    * networks, note that (1) computing from one layer to another might entail
     51    * extra computation on unrelated branches, and (2) computation starting in
     52    * the middle may be incorrect if all of the layers of a fan-in are not
     53    * included.
     54    */
     55   //前向传播的几种形式
     56   Dtype ForwardFromTo(int start, int end);
     57   Dtype ForwardFrom(int start);
     58   Dtype ForwardTo(int end);
     59   /// @brief DEPRECATED; set input blobs then use Forward() instead.
     60   //弃用。指定输入Blob返回输出Blob。确定输入Blob后使用Forward()替代。
     61   const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom,
     62       Dtype* loss = NULL);
     63 
     64   /**
     65    * @brief Zeroes out the diffs of all net parameters.
     66    *        Should be run before Backward.
     67    */
     68   //将diff的所有权值参数清零。应在反向传播之前调用。
     69   void ClearParamDiffs();
     70 
     71   /**
     72    * The network backward should take no input and output, since it solely
     73    * computes the gradient w.r.t the parameters, and the data has already been
     74    * provided during the forward pass.
     75    */
     76   //反向传播的几个函数。不需要输入或输出的Blob,数据已经在前向传播时提供。
     77   void Backward();
     78   void BackwardFromTo(int start, int end);
     79   void BackwardFrom(int start);
     80   void BackwardTo(int end);
     81 
     82   /**
     83    * @brief Reshape all layers from bottom to top.
     84    *
     85    * This is useful to propagate changes to layer sizes without running
     86    * a forward pass, e.g. to compute output feature size.
     87    */
     88   //对Net中所有层自底向上的变形函数。无需进行一次前向传播就可以计算各层所需的Blob尺寸。
     89   void Reshape();
     90   //前向传播+反向传播。输入为bottom blob,输出为loss。
     91   Dtype ForwardBackward() {
     92     Dtype loss;
     93     Forward(&loss);
     94     Backward();
     95     return loss;
     96   }
     97 
     98   /// @brief Updates the network weights based on the diff values computed.
     99   void Update();//根据(Solver)准备好的diff更新网络权值
    100   /**
    101    * @brief Shares weight data of owner blobs with shared blobs.
    102    *
    103    * Note: this is called by Net::Init, and thus should normally not be
    104    * called manually.
    105    */
    106   void ShareWeights();//由初始化函数调用,不能随意调用
    107 
    108   /**
    109    * @brief For an already initialized net, implicitly copies (i.e., using no
    110    *        additional memory) the pre-trained layers from another Net.
    111    */
    112   void ShareTrainedLayersWith(const Net* other);
    113   // For an already initialized net, CopyTrainedLayersFrom() copies the already
    114   // trained layers from another net parameter instance.
    115   /**
    116    * @brief For an already initialized net, copies the pre-trained layers from
    117    *        another Net.
    118    */
    119   void CopyTrainedLayersFrom(const NetParameter& param);
    120   void CopyTrainedLayersFrom(const string trained_filename);
    121   void CopyTrainedLayersFromBinaryProto(const string trained_filename);
    122   void CopyTrainedLayersFromHDF5(const string trained_filename);
    123   /// @brief Writes the net to a proto.
    124   void ToProto(NetParameter* param, bool write_diff = false) const;
    125   /// @brief Writes the net to an HDF5 file.
    126   void ToHDF5(const string& filename, bool write_diff = false) const;
    127 
    128   /// @brief returns the network name.返回网络名称
    129   inline const string& name() const { return name_; }
    130   /// @brief returns the layer names 返回层名
    131   inline const vector<string>& layer_names() const { return layer_names_; }
    132   /// @brief returns the blob names 
    133   inline const vector<string>& blob_names() const { return blob_names_; }
    134   /// @brief returns the blobs
    135   inline const vector<shared_ptr<Blob<Dtype> > >& blobs() const {
    136     return blobs_;
    137   }
    138   /// @brief returns the layers
    139   inline const vector<shared_ptr<Layer<Dtype> > >& layers() const {
    140     return layers_;
    141   }
    142   /// @brief returns the phase: TRAIN or TEST
    143   inline Phase phase() const { return phase_; }
    144   /**
    145    * @brief returns the bottom vecs for each layer -- usually you won't
    146    *        need this unless you do per-layer checks such as gradients.
    147    */
    148   inline const vector<vector<Blob<Dtype>*> >& bottom_vecs() const {
    149     return bottom_vecs_;//grad check 需要调用返回每一层的输入。平时不需要。
    150   }
    151   /**
    152    * @brief returns the top vecs for each layer -- usually you won't
    153    *        need this unless you do per-layer checks such as gradients.
    154    */
    155   inline const vector<vector<Blob<Dtype>*> >& top_vecs() const {
    156     return top_vecs_;//grad check 需要调用返回每一层的输出。平时不需要。
    157   }
    158   /// @brief returns the ids of the top blobs of layer i
    159   inline const vector<int> & top_ids(int i) const {
    160     CHECK_GE(i, 0) << "Invalid layer id";
    161     CHECK_LT(i, top_id_vecs_.size()) << "Invalid layer id";
    162     return top_id_vecs_[i];
    163   }
    164   /// @brief returns the ids of the bottom blobs of layer i
    165   inline const vector<int> & bottom_ids(int i) const {
    166     CHECK_GE(i, 0) << "Invalid layer id";
    167     CHECK_LT(i, bottom_id_vecs_.size()) << "Invalid layer id";
    168     return bottom_id_vecs_[i];
    169   }
    170   inline const vector<vector<bool> >& bottom_need_backward() const {
    171     return bottom_need_backward_;
    172   }
    173   inline const vector<Dtype>& blob_loss_weights() const {
    174     return blob_loss_weights_;
    175   }
    176   inline const vector<bool>& layer_need_backward() const {
    177     return layer_need_backward_;
    178   }
    179   /// @brief returns the parameters
    180   inline const vector<shared_ptr<Blob<Dtype> > >& params() const {
    181     return params_;
    182   }
    183   inline const vector<Blob<Dtype>*>& learnable_params() const {
    184     return learnable_params_;
    185   }
    186   /// @brief returns the learnable parameter learning rate multipliers
    187   inline const vector<float>& params_lr() const { return params_lr_; }
    188   inline const vector<bool>& has_params_lr() const { return has_params_lr_; }
    189   /// @brief returns the learnable parameter decay multipliers
    190   inline const vector<float>& params_weight_decay() const {
    191     return params_weight_decay_;
    192   }
    193   inline const vector<bool>& has_params_decay() const {
    194     return has_params_decay_;
    195   }
    196   const map<string, int>& param_names_index() const {
    197     return param_names_index_;
    198   }
    199   inline const vector<int>& param_owners() const { return param_owners_; }
    200   inline const vector<string>& param_display_names() const {
    201     return param_display_names_;
    202   }
    203   /// @brief Input and output blob numbers 输入输出的blob数量
    204   inline int num_inputs() const { return net_input_blobs_.size(); }
    205   inline int num_outputs() const { return net_output_blobs_.size(); }
    206   inline const vector<Blob<Dtype>*>& input_blobs() const {
    207     return net_input_blobs_;//返回输入blob
    208   }
    209   inline const vector<Blob<Dtype>*>& output_blobs() const {
    210     return net_output_blobs_;//返回输出blob
    211   }
    212   inline const vector<int>& input_blob_indices() const {
    213     return net_input_blob_indices_;//返回输入blob下标
    214   }
    215   inline const vector<int>& output_blob_indices() const {
    216     return net_output_blob_indices_;//返回输出blob下标
    217   }
    218   bool has_blob(const string& blob_name) const;//查找网络是否包含该blob
    219   const shared_ptr<Blob<Dtype> > blob_by_name(const string& blob_name) const;//如果有则找出
    220   bool has_layer(const string& layer_name) const;//查找网络是否包含该层
    221   const shared_ptr<Layer<Dtype> > layer_by_name(const string& layer_name) const;//如果有则找出
    222 
    223   void set_debug_info(const bool value) { debug_info_ = value; }
    224 
    225   // Helpers for Init.以下用于初始化
    226   /**
    227    * @brief Remove layers that the user specified should be excluded given the current
    228    *        phase, level, and stage.
    229    */
    230   //过滤掉用户指定的在某阶段、级别、状态下不应包含的层
    231   static void FilterNet(const NetParameter& param,
    232       NetParameter* param_filtered);
    233   /// @brief return whether NetState state meets NetStateRule rule
    234   //判断网络状态是否满足网络规则
    235   static bool StateMeetsRule(const NetState& state, const NetStateRule& rule,
    236       const string& layer_name);
    237 
    238   // Invoked at specific points during an iteration
    239   class Callback {
    240    protected:
    241     virtual void run(int layer) = 0;
    242 
    243     template <typename T>
    244     friend class Net;
    245   };
    246   const vector<Callback*>& before_forward() const { return before_forward_; }
    247   void add_before_forward(Callback* value) {
    248     before_forward_.push_back(value);
    249   }
    250   const vector<Callback*>& after_forward() const { return after_forward_; }
    251   void add_after_forward(Callback* value) {
    252     after_forward_.push_back(value);
    253   }
    254   const vector<Callback*>& before_backward() const { return before_backward_; }
    255   void add_before_backward(Callback* value) {
    256     before_backward_.push_back(value);
    257   }
    258   const vector<Callback*>& after_backward() const { return after_backward_; }
    259   void add_after_backward(Callback* value) {
    260     after_backward_.push_back(value);
    261   }
    262 
    263  protected:
    264   // Helpers for Init.
    265   /// @brief Append a new top blob to the net.给网络追加新的输出blob
    266   void AppendTop(const NetParameter& param, const int layer_id,
    267                  const int top_id, set<string>* available_blobs,
    268                  map<string, int>* blob_name_to_idx);
    269   /// @brief Append a new bottom blob to the net.给网络追加新的输入blob
    270   int AppendBottom(const NetParameter& param, const int layer_id,
    271                    const int bottom_id, set<string>* available_blobs,
    272                    map<string, int>* blob_name_to_idx);
    273   /// @brief Append a new parameter blob to the net.给网络追加新的权值blob
    274   void AppendParam(const NetParameter& param, const int layer_id,
    275                    const int param_id);
    276   //以下几个函数显示调试信息
    277   /// @brief Helper for displaying debug info in Forward.
    278   void ForwardDebugInfo(const int layer_id);
    279   /// @brief Helper for displaying debug info in Backward.
    280   void BackwardDebugInfo(const int layer_id);
    281   /// @brief Helper for displaying debug info in Update.
    282   void UpdateDebugInfo(const int param_id);
    283 
    284   /// @brief The network name网络名
    285   string name_;
    286   /// @brief The phase: TRAIN or TEST 网络当前阶段(训练 测试)
    287   Phase phase_;
    288   /// @brief Individual layers in the net 网络中的独立层
    289   vector<shared_ptr<Layer<Dtype> > > layers_;
    290   vector<string> layer_names_;//层名称
    291   map<string, int> layer_names_index_;//层名称与索引映射表
    292   vector<bool> layer_need_backward_;//标记某个层是否需要反向传播
    293   /// @brief the blobs storing intermediate results between the layer.
    294   vector<shared_ptr<Blob<Dtype> > > blobs_;//层与层之间传递数据通道
    295   vector<string> blob_names_;//Blob名称
    296   map<string, int> blob_names_index_;//Blob名称和索引映射表
    297   vector<bool> blob_need_backward_;//标记Blob是否需要反向传播
    298   /// bottom_vecs stores the vectors containing the input for each layer.
    299   /// They don't actually host the blobs (blobs_ does), so we simply store
    300   /// pointers.
    301   //存放每个层的输入Blob指针(真正数据所有者为blobs_)
    302   vector<vector<Blob<Dtype>*> > bottom_vecs_;
    303   vector<vector<int> > bottom_id_vecs_;
    304   vector<vector<bool> > bottom_need_backward_;
    305   /// top_vecs stores the vectors containing the output for each layer
    306   //存放每个层的输出Blob指针(真正数据所有者为blobs_)
    307   vector<vector<Blob<Dtype>*> > top_vecs_;
    308   vector<vector<int> > top_id_vecs_;
    309   /// Vector of weight in the loss (or objective) function of each net blob,
    310   /// indexed by blob_id.
    311   //每个blob对全局损失(目标函数)的贡献权重
    312   vector<Dtype> blob_loss_weights_;
    313   vector<vector<int> > param_id_vecs_;
    314   vector<int> param_owners_;
    315   vector<string> param_display_names_;
    316   vector<pair<int, int> > param_layer_indices_;
    317   map<string, int> param_names_index_;
    318   /// blob indices for the input and the output of the net
    319   //网络输入输出blob的索引
    320   vector<int> net_input_blob_indices_;
    321   vector<int> net_output_blob_indices_;
    322   vector<Blob<Dtype>*> net_input_blobs_;
    323   vector<Blob<Dtype>*> net_output_blobs_;
    324   /// The parameters in the network. 网络权值
    325   vector<shared_ptr<Blob<Dtype> > > params_;
    326   vector<Blob<Dtype>*> learnable_params_;//可训练的网络权值
    327   /**
    328    * The mapping from params_ -> learnable_params_: we have
    329    * learnable_param_ids_.size() == params_.size(),
    330    * and learnable_params_[learnable_param_ids_[i]] == params_[i].get()
    331    * if and only if params_[i] is an "owner"; otherwise, params_[i] is a sharer
    332    * and learnable_params_[learnable_param_ids_[i]] gives its owner.
    333    */
    334    //params_ 到 learnable_params_映射: 
    335    //当且仅当params_[i]是所有者时,learnable_param_ids_.size() == params_.size(),
    336    //learnable_params_[learnable_param_ids_[i]] == params_[i].get()
    337    //否则 params_[i] 只是共享者,learnable_params_[learnable_param_ids_[i]] 给出所有者
    338   vector<int> learnable_param_ids_;
    339   /// the learning rate multipliers for learnable_params_
    340   vector<float> params_lr_;//学习率倍乘因子
    341   vector<bool> has_params_lr_;
    342   /// the weight decay multipliers for learnable_params_
    343   vector<float> params_weight_decay_;//权值衰减因子
    344   vector<bool> has_params_decay_;
    345   /// The bytes of memory used by this net
    346   size_t memory_used_;//记录网络占用的内存大小
    347   /// Whether to compute and display debug info for the net.
    348   bool debug_info_;//是否显示调试信息
    349   // Callbacks
    350   vector<Callback*> before_forward_;
    351   vector<Callback*> after_forward_;
    352   vector<Callback*> before_backward_;
    353   vector<Callback*> after_backward_;
    354 
    355 DISABLE_COPY_AND_ASSIGN(Net);//禁止拷贝构造函数、赋值运算函数
    356 };
    357 
    358 
    359 }  // namespace caffe
    360 
    361 #endif  // CAFFE_NET_HPP_

    未完待续……

    内容来自赵永科《深度学习 21天实战caffe》

  • 相关阅读:
    Java面向对象_继承——基本概念以及管理化妆品实例分析
    Java面向对象_单例设计模式
    Java面向对象_增强for可变参数与代码块
    Java面向对象_对象一一对应关系和this关键字
    Java面向对象_对象数组
    Java面向对象_对象内存分析—值传递和引用传递
    Leetcode 203. 移除链表元素
    Leetcode 160. 相交链表
    Leetcode 141. 环形链表
    Leetcode 82. 删除排序链表中的重复元素 II
  • 原文地址:https://www.cnblogs.com/xiangfeidemengzhu/p/7100440.html
Copyright © 2011-2022 走看看