zoukankan      html  css  js  c++  java
  • Caffe学习系列(17): blob

    对于blob.h文件。

    先看成员变量。定义了6个保护的成员变量,包括前、后向传播的数据,新、旧形状数据(?),

    数据个数及容量。

    再看成员函数。包括构造函数(4个参数),reshape(改变blob形状),以及很多inline函数。

    #ifndef CAFFE_BLOB_HPP_
    #define CAFFE_BLOB_HPP_
    
    #include <algorithm>
    #include <string>
    #include <vector>
    
    #include "caffe/common.hpp"
    #include "caffe/proto/caffe.pb.h"
    #include "caffe/syncedmem.hpp"
    
    const int kMaxBlobAxes = 32;
    
    namespace caffe {
    
    /**
     * @brief A wrapper around SyncedMemory holders serving as the basic
     *        computational unit through which Layer%s, Net%s, and Solver%s
     *        interact.
     *
     * TODO(dox): more thorough description.
     */
    template <typename Dtype>
    class Blob {
     public:
      Blob()
           : data_(), diff_(), count_(0), capacity_(0) {}
    
      /// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>.
      explicit Blob(const int num, const int channels, const int height,
          const int width);//构造函数,explicit防止隐式转换
      explicit Blob(const vector<int>& shape);
    
      /// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>.
      void Reshape(const int num, const int channels, const int height,
          const int width);
      /**
       * @brief Change the dimensions of the blob, allocating new memory if
       *        necessary.
       *
       * This function can be called both to create an initial allocation
       * of memory, and to adjust the dimensions of a top blob during Layer::Reshape
       * or Layer::Forward. When changing the size of blob, memory will only be
       * reallocated if sufficient memory does not already exist, and excess memory
       * will never be freed.
       *
       * Note that reshaping an input blob and immediately calling Net::Backward is
       * an error; either Net::Forward or Net::Reshape need to be called to
       * propagate the new input shape to higher layers.
       */
      void Reshape(const vector<int>& shape);
      void Reshape(const BlobShape& shape);
      void ReshapeLike(const Blob& other);
      //输出数据维度
      inline string shape_string() const {
        ostringstream stream;
        for (int i = 0; i < shape_.size(); ++i) {
          stream << shape_[i] << " ";
        }
        stream << "(" << count_ << ")";//数据个数
        return stream.str();
      }
      inline const vector<int>& shape() const { return shape_; }
      /**
       * @brief Returns the dimension of the index-th axis (or the negative index-th
       *        axis from the end, if index is negative).
       *
       * @param index the axis index, which may be negative as it will be
       *        "canonicalized" using CanonicalAxisIndex.
       *        Dies on out of range index.
       */
      inline int shape(int index) const {
        return shape_[CanonicalAxisIndex(index)];
      }
      inline int num_axes() const { return shape_.size(); }//返回维度
    
      inline int count() const { return count_; }
    
      /**
       * @brief Compute the volume of a slice; i.e., the product of dimensions
       *        among a range of axes.
       *
       * @param start_axis The first axis to include in the slice.
       *
       * @param end_axis The first axis to exclude from the slice.
       */
      inline int count(int start_axis, int end_axis) const {
        CHECK_LE(start_axis, end_axis);//判断维度的索引是否在范围内
        CHECK_GE(start_axis, 0);
        CHECK_GE(end_axis, 0);
        CHECK_LE(start_axis, num_axes());
        CHECK_LE(end_axis, num_axes());
        int count = 1;
        for (int i = start_axis; i < end_axis; ++i) {
          count *= shape(i);//数据的所有维度相乘,即数据的个数
        }
        return count;
      }
      /**
       * @brief Compute the volume of a slice spanning from a particular first
       *        axis to the final axis.
       *给定的维度到最后的维度之间包含的数据
       * @param start_axis The first axis to include in the slice.
       */
      inline int count(int start_axis) const {
        return count(start_axis, num_axes());
      }
    
      /**
       * @brief Returns the 'canonical' version of a (usually) user-specified axis,
       *        allowing for negative indexing (e.g., -1 for the last axis).
       *支持负数索引,相当于从后往前,
       * @param axis_index the axis index.
       *        If 0 <= index < num_axes(), return index.
       *        If -num_axes <= index <= -1, return (num_axes() - (-index)),
       *        e.g., the last axis index (num_axes() - 1) if index == -1,
       *        the second to last if index == -2, etc.
       *        Dies on out of range index.
       */
      inline int CanonicalAxisIndex(int axis_index) const {
        CHECK_GE(axis_index, -num_axes())
            << "axis " << axis_index << " out of range for " << num_axes()
            << "-D Blob with shape " << shape_string();
        CHECK_LT(axis_index, num_axes())
            << "axis " << axis_index << " out of range for " << num_axes()
            << "-D Blob with shape " << shape_string();
        if (axis_index < 0) {
          return axis_index + num_axes();
        }
        return axis_index;
      }
    
      /// @brief Deprecated legacy shape accessor num: use shape(0) instead.
      inline int num() const { return LegacyShape(0); }
      /// @brief Deprecated legacy shape accessor channels: use shape(1) instead.
      inline int channels() const { return LegacyShape(1); }
      /// @brief Deprecated legacy shape accessor height: use shape(2) instead.
      inline int height() const { return LegacyShape(2); }
      /// @brief Deprecated legacy shape accessor  use shape(3) instead.
      inline int width() const { return LegacyShape(3); }
      //检查blob的维度
      inline int LegacyShape(int index) const {
        CHECK_LE(num_axes(), 4)
            << "Cannot use legacy accessors on Blobs with > 4 axes.";
        CHECK_LT(index, 4);
        CHECK_GE(index, -4);
        if (index >= num_axes() || index < -num_axes()) {
          // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
          // indexing) -- this special case simulates the one-padding used to fill
          // extraneous axes of legacy blobs.
          return 1;
        }
        return shape(index);
      }
      //计算一维线性偏移量?
      inline int offset(const int n, const int c = 0, const int h = 0,
          const int w = 0) const {
        CHECK_GE(n, 0);
        CHECK_LE(n, num());
        CHECK_GE(channels(), 0);
        CHECK_LE(c, channels());
        CHECK_GE(height(), 0);
        CHECK_LE(h, height());
        CHECK_GE(width(), 0);
        CHECK_LE(w, width());
        return ((n * channels() + c) * height() + h) * width() + w;
      }
      //同上,参数不同
      inline int offset(const vector<int>& indices) const {
        CHECK_LE(indices.size(), num_axes());
        int offset = 0;
        for (int i = 0; i < num_axes(); ++i) {
          offset *= shape(i);
          if (indices.size() > i) {
            CHECK_GE(indices[i], 0);
            CHECK_LT(indices[i], shape(i));
            offset += indices[i];
          }
        }
        return offset;
      }
      /**
       * @brief Copy from a source Blob.
       *
       * @param source the Blob to copy from
       * @param copy_diff if false, copy the data; if true, copy the diff
       * @param reshape if false, require this Blob to be pre-shaped to the shape
       *        of other (and die otherwise); if true, Reshape this Blob to other's
       *        shape if necessary
       */
      //复制blob,如果diff为false的话,则复制data,否则复制diff
      //reshape为true,改变blob的形状
      void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
          bool reshape = false);
      //获取内存下的数据(forward采用)
      inline Dtype data_at(const int n, const int c, const int h,
          const int w) const {
        return cpu_data()[offset(n, c, h, w)];
      }
      //获取内存中diff数据(反向传播采用)
      inline Dtype diff_at(const int n, const int c, const int h,
          const int w) const {
        return cpu_diff()[offset(n, c, h, w)];
      }
    
      inline Dtype data_at(const vector<int>& index) const {
        return cpu_data()[offset(index)];
      }
    
      inline Dtype diff_at(const vector<int>& index) const {
        return cpu_diff()[offset(index)];
      }
      //同步内存shared_ptr
      inline const shared_ptr<SyncedMemory>& data() const {
        CHECK(data_);
        return data_;
      }
    
      inline const shared_ptr<SyncedMemory>& diff() const {
        CHECK(diff_);
        return diff_;
      }
      //属性
      const Dtype* cpu_data() const;
      void set_cpu_data(Dtype* data);
      const int* gpu_shape() const;
      const Dtype* gpu_data() const;
      void set_gpu_data(Dtype* data);
      const Dtype* cpu_diff() const;
      const Dtype* gpu_diff() const;
      Dtype* mutable_cpu_data();
      Dtype* mutable_gpu_data();
      Dtype* mutable_cpu_diff();
      Dtype* mutable_gpu_diff();
    
      //计算
      void Update();
      //从protobuf序列化文件中读取blob对象
      void FromProto(const BlobProto& proto, bool reshape = true);
      //将对象序列化为protobuf中
      void ToProto(BlobProto* proto, bool write_diff = false) const;
      
      //计算绝对值
      /// @brief Compute the sum of absolute values (L1 norm) of the data.
      Dtype asum_data() const;
      /// @brief Compute the sum of absolute values (L1 norm) of the diff.
      Dtype asum_diff() const;
      //计算平方和
      /// @brief Compute the sum of squares (L2 norm squared) of the data.
      Dtype sumsq_data() const;
      /// @brief Compute the sum of squares (L2 norm squared) of the diff.
      Dtype sumsq_diff() const;
      //
      /// @brief Scale the blob data by a constant factor.
      void scale_data(Dtype scale_factor);
      /// @brief Scale the blob diff by a constant factor.
      void scale_diff(Dtype scale_factor);
    
      /**
       * @brief Set the data_ shared_ptr to point to the SyncedMemory holding the
       *        data_ of Blob other -- useful in Layer%s which simply perform a copy
       *        in their Forward pass.
       *将别的blob的data和diff指针给这个blob,实现数据的共享
        同时注意到的是,这个操作会引起这个blob里面的syncedmemory被释放,
        因为shared_ptr被用=重置的时候,会调用其析构器?
        在前向传递中,对简单的复制比较有用
       * This deallocates the SyncedMemory holding this Blob's data_, as
       * shared_ptr calls its destructor when reset with the "=" operator.
       */
      void ShareData(const Blob& other);
      /**
       * @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the
       *        diff_ of Blob other -- useful in Layer%s which simply perform a copy
       *        in their Forward pass.
       *
       * This deallocates the SyncedMemory holding this Blob's diff_, as
       * shared_ptr calls its destructor when reset with the "=" operator.
       */
      void ShareDiff(const Blob& other);
      //判断形状是否相等
      bool ShapeEquals(const BlobProto& other);
    
     protected:
      shared_ptr<SyncedMemory> data_;//前向传播的数据,
      shared_ptr<SyncedMemory> diff_;//反向传播的数据
      shared_ptr<SyncedMemory> shape_data_;//旧的形状数据
      vector<int> shape_;//新的形状数据
      int count_;//数据的个数
      int capacity_;//容量
    
      DISABLE_COPY_AND_ASSIGN(Blob);
    };  // class Blob
    
    }  // namespace caffe
    
    #endif  // CAFFE_BLOB_HPP_
    View Code

     对于blob.cpp文件,主要关注几个函数的实现。

    Reshape函数:将shape_和shape_data_置为新的blob大小,同时统计数据的个数,并为data和diff开辟空间。

    #include <climits>
    #include <vector>
    
    #include "caffe/blob.hpp"
    #include "caffe/common.hpp"
    #include "caffe/syncedmem.hpp"
    #include "caffe/util/math_functions.hpp"
    
    namespace caffe {
    
    template <typename Dtype>//老方法调用新方法
    void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
        const int width) {
      vector<int> shape(4);
      shape[0] = num;
      shape[1] = channels;
      shape[2] = height;
      shape[3] = width;
      Reshape(shape);
    }
    
    template <typename Dtype>
    void Blob<Dtype>::Reshape(const vector<int>& shape) {
      CHECK_LE(shape.size(), kMaxBlobAxes);//是否小于规定的最大BLOB的维度(35维)  
      count_ = 1;
      shape_.resize(shape.size());//将旧的数据大小置为新的数据大小
      if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
          //shape_和shape_data_的区别就在于后者分配了空间(有什么用呢?)
          shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));
      }
      int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
      for (int i = 0; i < shape.size(); ++i) {
        CHECK_GE(shape[i], 0);//检查数据是否合法
        if (count_ != 0) {
          CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";
        }
        count_ *= shape[i];//数据个数
        shape_[i] = shape[i];//复制shape到新的和旧的形状数据
        shape_data[i] = shape[i];
      }
      if (count_ > capacity_) {//如果超过了容量,重新分配内存
        capacity_ = count_;
        data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
        diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
      }
    }
    
    template <typename Dtype>
    void Blob<Dtype>::Reshape(const BlobShape& shape) {
      CHECK_LE(shape.dim_size(), kMaxBlobAxes);
      vector<int> shape_vec(shape.dim_size());
      for (int i = 0; i < shape.dim_size(); ++i) {
        shape_vec[i] = shape.dim(i);
      }
      Reshape(shape_vec);
    }
    
    template <typename Dtype>
    void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
      Reshape(other.shape());
    }
    
    template <typename Dtype>
    Blob<Dtype>::Blob(const int num, const int channels, const int height,
        const int width)
      // capacity_ must be initialized before calling Reshape
      : capacity_(0) {
      Reshape(num, channels, height, width);
    }
    
    template <typename Dtype>
    Blob<Dtype>::Blob(const vector<int>& shape)
      // capacity_ must be initialized before calling Reshape
      : capacity_(0) {
      Reshape(shape);
    }
    
    template <typename Dtype>
    const int* Blob<Dtype>::gpu_shape() const {
      CHECK(shape_data_);
      return (const int*)shape_data_->gpu_data();
    }
    //得到data
    template <typename Dtype>
    const Dtype* Blob<Dtype>::cpu_data() const {
      CHECK(data_);
      return (const Dtype*)data_->cpu_data();
    }
    //设置data
    template <typename Dtype>
    void Blob<Dtype>::set_cpu_data(Dtype* data) {
      CHECK(data);
      // Make sure CPU and GPU sizes remain equal
      size_t size = count_ * sizeof(Dtype);
      if (data_->size() != size) {
        data_.reset(new SyncedMemory(size));
        diff_.reset(new SyncedMemory(size));
      }
      data_->set_cpu_data(data);
    }
    
    template <typename Dtype>
    const Dtype* Blob<Dtype>::gpu_data() const {
      CHECK(data_);
      return (const Dtype*)data_->gpu_data();
    }
    
    template <typename Dtype>
    void Blob<Dtype>::set_gpu_data(Dtype* data) {
      CHECK(data);
      // Make sure CPU and GPU sizes remain equal
      size_t size = count_ * sizeof(Dtype);
      if (data_->size() != size) {
        data_.reset(new SyncedMemory(size));
        diff_.reset(new SyncedMemory(size));
      }
      data_->set_gpu_data(data);
    }
    
    template <typename Dtype>
    const Dtype* Blob<Dtype>::cpu_diff() const {
      CHECK(diff_);
      return (const Dtype*)diff_->cpu_data();
    }
    
    template <typename Dtype>
    const Dtype* Blob<Dtype>::gpu_diff() const {
      CHECK(diff_);
      return (const Dtype*)diff_->gpu_data();
    }
    
    template <typename Dtype>
    Dtype* Blob<Dtype>::mutable_cpu_data() {
      CHECK(data_);
      return static_cast<Dtype*>(data_->mutable_cpu_data());
    }
    //关键字mutable,变量被其修饰时,即使函数为const也能修改之
    template <typename Dtype>
    Dtype* Blob<Dtype>::mutable_gpu_data() {
      CHECK(data_);
      return static_cast<Dtype*>(data_->mutable_gpu_data());
    }
    
    template <typename Dtype>
    Dtype* Blob<Dtype>::mutable_cpu_diff() {
      CHECK(diff_);
      return static_cast<Dtype*>(diff_->mutable_cpu_data());
    }
    
    template <typename Dtype>
    Dtype* Blob<Dtype>::mutable_gpu_diff() {
      CHECK(diff_);
      return static_cast<Dtype*>(diff_->mutable_gpu_data());
    }
    //复制blob
    template <typename Dtype>
    void Blob<Dtype>::ShareData(const Blob& other) {
      CHECK_EQ(count_, other.count());
      data_ = other.data();
    }
    
    template <typename Dtype>
    void Blob<Dtype>::ShareDiff(const Blob& other) {
      CHECK_EQ(count_, other.count());
      diff_ = other.diff();
    }
    
    // The "update" method is used for parameter blobs in a Net, which are stored
    // as Blob<float> or Blob<double> -- hence we do not define it for
    // Blob<int> or Blob<unsigned int>.
    template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }
    template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }
    //更新 根据data_的head来更新,更新为data=-1*diff+data
    template <typename Dtype>
    void Blob<Dtype>::Update() {
      // We will perform update based on where the data is located.
      switch (data_->head()) {
      case SyncedMemory::HEAD_AT_CPU:
        // perform computation on CPU
        caffe_axpy<Dtype>(count_, Dtype(-1),
            static_cast<const Dtype*>(diff_->cpu_data()),
            static_cast<Dtype*>(data_->mutable_cpu_data()));
        break;
      case SyncedMemory::HEAD_AT_GPU:
      case SyncedMemory::SYNCED:
    #ifndef CPU_ONLY
        // perform computation on GPU
        caffe_gpu_axpy<Dtype>(count_, Dtype(-1),
            static_cast<const Dtype*>(diff_->gpu_data()),
            static_cast<Dtype*>(data_->mutable_gpu_data()));
    #else
        NO_GPU;
    #endif
        break;
      default:
        LOG(FATAL) << "Syncedmem not initialized.";
      }
    }
    
    template <> unsigned int Blob<unsigned int>::asum_data() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    
    template <> int Blob<int>::asum_data() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    
    template <typename Dtype>
    Dtype Blob<Dtype>::asum_data() const {
      if (!data_) { return 0; }
      switch (data_->head()) {
      case SyncedMemory::HEAD_AT_CPU:
        return caffe_cpu_asum(count_, cpu_data());
      case SyncedMemory::HEAD_AT_GPU:
      case SyncedMemory::SYNCED:
    #ifndef CPU_ONLY
      {
        Dtype asum;
        caffe_gpu_asum(count_, gpu_data(), &asum);
        return asum;
      }
    #else
        NO_GPU;
    #endif
      case SyncedMemory::UNINITIALIZED:
        return 0;
      default:
        LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
      }
      return 0;
    }
    
    template <> unsigned int Blob<unsigned int>::asum_diff() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    
    template <> int Blob<int>::asum_diff() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    //计算data的L1范数
    template <typename Dtype>
    Dtype Blob<Dtype>::asum_diff() const {
      if (!diff_) { return 0; }
      switch (diff_->head()) {
      case SyncedMemory::HEAD_AT_CPU:
        return caffe_cpu_asum(count_, cpu_diff());
      case SyncedMemory::HEAD_AT_GPU:
      case SyncedMemory::SYNCED:
    #ifndef CPU_ONLY
      {
        Dtype asum;
        caffe_gpu_asum(count_, gpu_diff(), &asum);
        return asum;
      }
    #else
        NO_GPU;
    #endif
      case SyncedMemory::UNINITIALIZED:
        return 0;
      default:
        LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
      }
      return 0;
    }
    
    template <> unsigned int Blob<unsigned int>::sumsq_data() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    
    template <> int Blob<int>::sumsq_data() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    //L2范数
    template <typename Dtype>
    Dtype Blob<Dtype>::sumsq_data() const {
      Dtype sumsq;
      const Dtype* data;
      if (!data_) { return 0; }
      switch (data_->head()) {
      case SyncedMemory::HEAD_AT_CPU:
        data = cpu_data();
        sumsq = caffe_cpu_dot(count_, data, data);
        break;
      case SyncedMemory::HEAD_AT_GPU:
      case SyncedMemory::SYNCED:
    #ifndef CPU_ONLY
        data = gpu_data();
        caffe_gpu_dot(count_, data, data, &sumsq);
    #else
        NO_GPU;
    #endif
        break;
      case SyncedMemory::UNINITIALIZED:
        return 0;
      default:
        LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
      }
      return sumsq;
    }
    
    template <> unsigned int Blob<unsigned int>::sumsq_diff() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    
    template <> int Blob<int>::sumsq_diff() const {
      NOT_IMPLEMENTED;
      return 0;
    }
    
    template <typename Dtype>
    Dtype Blob<Dtype>::sumsq_diff() const {
      Dtype sumsq;
      const Dtype* diff;
      if (!diff_) { return 0; }
      switch (diff_->head()) {
      case SyncedMemory::HEAD_AT_CPU:
        diff = cpu_diff();
        sumsq = caffe_cpu_dot(count_, diff, diff);
        break;
      case SyncedMemory::HEAD_AT_GPU:
      case SyncedMemory::SYNCED:
    #ifndef CPU_ONLY
        diff = gpu_diff();
        caffe_gpu_dot(count_, diff, diff, &sumsq);
        break;
    #else
        NO_GPU;
    #endif
      case SyncedMemory::UNINITIALIZED:
        return 0;
      default:
        LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
      }
      return sumsq;
    }
    
    template <> void Blob<unsigned int>::scale_data(unsigned int scale_factor) {
      NOT_IMPLEMENTED;
    }
    
    template <> void Blob<int>::scale_data(int scale_factor) {
      NOT_IMPLEMENTED;
    }
    //将data部分乘一个因子
    template <typename Dtype>
    void Blob<Dtype>::scale_data(Dtype scale_factor) {
      Dtype* data;
      if (!data_) { return; }
      switch (data_->head()) {
      case SyncedMemory::HEAD_AT_CPU:
        data = mutable_cpu_data();
        caffe_scal(count_, scale_factor, data);
        return;
      case SyncedMemory::HEAD_AT_GPU:
      case SyncedMemory::SYNCED:
    #ifndef CPU_ONLY
        data = mutable_gpu_data();
        caffe_gpu_scal(count_, scale_factor, data);
        return;
    #else
        NO_GPU;
    #endif
      case SyncedMemory::UNINITIALIZED:
        return;
      default:
        LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
      }
    }
    
    template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor) {
      NOT_IMPLEMENTED;
    }
    
    template <> void Blob<int>::scale_diff(int scale_factor) {
      NOT_IMPLEMENTED;
    }
    
    template <typename Dtype>
    void Blob<Dtype>::scale_diff(Dtype scale_factor) {
      Dtype* diff;
      if (!diff_) { return; }
      switch (diff_->head()) {
      case SyncedMemory::HEAD_AT_CPU:
        diff = mutable_cpu_diff();
        caffe_scal(count_, scale_factor, diff);
        return;
      case SyncedMemory::HEAD_AT_GPU:
      case SyncedMemory::SYNCED:
    #ifndef CPU_ONLY
        diff = mutable_gpu_diff();
        caffe_gpu_scal(count_, scale_factor, diff);
        return;
    #else
        NO_GPU;
    #endif
      case SyncedMemory::UNINITIALIZED:
        return;
      default:
        LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
      }
    }
    //两个blob的shape是否一样
    template <typename Dtype>
    bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
      if (other.has_num() || other.has_channels() ||
          other.has_height() || other.has_width()) {//判断是否是旧的blob(为何能判断?)
        // Using deprecated 4D Blob dimensions --
        // shape is (num, channels, height, width).
        // Note: we do not use the normal Blob::num(), Blob::channels(), etc.
        // methods as these index from the beginning of the blob shape, where legacy
        // parameter blobs were indexed from the end of the blob shape (e.g., bias
        // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)).
        return shape_.size() <= 4 &&
               LegacyShape(-4) == other.num() &&
               LegacyShape(-3) == other.channels() &&
               LegacyShape(-2) == other.height() &&
               LegacyShape(-1) == other.width();
      }
      //不是则复制判断
      vector<int> other_shape(other.shape().dim_size());
      for (int i = 0; i < other.shape().dim_size(); ++i) {
        other_shape[i] = other.shape().dim(i);
      }
      return shape_ == other_shape;
    }
    //复制diff和data
    template <typename Dtype>
    void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
      if (source.count() != count_ || source.shape() != shape_) {
        if (reshape) {
          ReshapeLike(source);
        } else {
          LOG(FATAL) << "Trying to copy blobs of different sizes.";
        }
      }
      switch (Caffe::mode()) {
      case Caffe::GPU:
        if (copy_diff) {
          caffe_copy(count_, source.gpu_diff(),
              static_cast<Dtype*>(diff_->mutable_gpu_data()));
        } else {
          caffe_copy(count_, source.gpu_data(),
              static_cast<Dtype*>(data_->mutable_gpu_data()));
        }
        break;
      case Caffe::CPU:
        if (copy_diff) {
          caffe_copy(count_, source.cpu_diff(),
              static_cast<Dtype*>(diff_->mutable_cpu_data()));
        } else {
          caffe_copy(count_, source.cpu_data(),
              static_cast<Dtype*>(data_->mutable_cpu_data()));
        }
        break;
      default:
        LOG(FATAL) << "Unknown caffe mode.";
      }
    }
    //
    template <typename Dtype>
    void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
      if (reshape) {
        vector<int> shape;
        if (proto.has_num() || proto.has_channels() || //如果是旧的blob则直接转为新的blob中的数据
            proto.has_height() || proto.has_width()) {
          // Using deprecated 4D Blob dimensions --
          // shape is (num, channels, height, width).
          shape.resize(4);
          shape[0] = proto.num();
          shape[1] = proto.channels();
          shape[2] = proto.height();
          shape[3] = proto.width();
        } else { 
          shape.resize(proto.shape().dim_size());
          for (int i = 0; i < proto.shape().dim_size(); ++i) {
            shape[i] = proto.shape().dim(i);
          }
        }
        Reshape(shape);
      } else {
        CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
      }
      // copy data复制data和diff
      Dtype* data_vec = mutable_cpu_data();//获取当前数据的互斥指针
      if (proto.double_data_size() > 0) {
        CHECK_EQ(count_, proto.double_data_size());
        for (int i = 0; i < count_; ++i) {
          data_vec[i] = proto.double_data(i);
        }
      } else {
        CHECK_EQ(count_, proto.data_size());
        for (int i = 0; i < count_; ++i) {
          data_vec[i] = proto.data(i);
        }
      }
      if (proto.double_diff_size() > 0) {
        CHECK_EQ(count_, proto.double_diff_size());
        Dtype* diff_vec = mutable_cpu_diff();
        for (int i = 0; i < count_; ++i) {
          diff_vec[i] = proto.double_diff(i);
        }
      } else if (proto.diff_size() > 0) {
        CHECK_EQ(count_, proto.diff_size());
        Dtype* diff_vec = mutable_cpu_diff();
        for (int i = 0; i < count_; ++i) {
          diff_vec[i] = proto.diff(i);
        }
      }
    }
    
    template <>
    void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const {
      proto->clear_shape();
      for (int i = 0; i < shape_.size(); ++i) {
        proto->mutable_shape()->add_dim(shape_[i]);
      }
      proto->clear_double_data();
      proto->clear_double_diff();
      const double* data_vec = cpu_data();
      for (int i = 0; i < count_; ++i) {
        proto->add_double_data(data_vec[i]);
      }
      if (write_diff) {
        const double* diff_vec = cpu_diff();
        for (int i = 0; i < count_; ++i) {
          proto->add_double_diff(diff_vec[i]);
        }
      }
    }
    
    template <>
    void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const {
      proto->clear_shape();
      for (int i = 0; i < shape_.size(); ++i) {
        proto->mutable_shape()->add_dim(shape_[i]);
      }
      proto->clear_data();
      proto->clear_diff();
      const float* data_vec = cpu_data();
      for (int i = 0; i < count_; ++i) {
        proto->add_data(data_vec[i]);
      }
      if (write_diff) {
        const float* diff_vec = cpu_diff();
        for (int i = 0; i < count_; ++i) {
          proto->add_diff(diff_vec[i]);
        }
      }
    }
    
    INSTANTIATE_CLASS(Blob);
    template class Blob<int>;
    template class Blob<unsigned int>;
    
    }  // namespace caffe
    View Code

    Blob:4个维度 n x c x h x w;

    bottom[0] 、bottom[1]代表该层有几个输入。

    bottom[0]->count(): 输入中,元素的总维数(个数)

    bottom[0]->nums(): 输入中,块(block)的个数,该参数还对应batch_size,即同时输入了几张图片 
    c:是卷积核(filter)的个数,每个卷积核产生一个通道的输出;在输入层,c直接就是图像的通道数; 
    还有一个变量,dim;:每个块的维度(元素个数)

    形象化: 
    | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx |

    上图,nums = 8, dim = 5, count = 5*8 =40;

    参考:http://blog.csdn.net/qq_14975217/article/details/51524042

             http://blog.csdn.net/xizero00/article/details/50886829

             http://www.cnblogs.com/louyihang-loves-baiyan/p/5149628.html

    关于常见的BLAS函数,参考:http://www.cnblogs.com/huashiyiqike/p/3886670.html

    关于protobuf,参考:https://www.ibm.com/developerworks/cn/linux/l-cn-gpb/       

  • 相关阅读:
    新书推荐——《How We Test Software at Microsoft》
    [ZZ]采访与书摘:使用C#进行基于模型的软件测试和分析
    Adding Different Types of Data Sources to a Web Test
    [ZZ]为什么传统的自动化测试工具会扼杀敏捷?
    很久没有这么High了
    留图以纪念这次地震
    white——Automate windows applications
    WatiN、Wax、WatiN Test Recorder开源自动化测试框架
    绝版的T61普屏
    有感于公司搬家
  • 原文地址:https://www.cnblogs.com/573177885qq/p/6132651.html
Copyright © 2011-2022 走看看