GPU和CPU实现的不一样,这里贴的是CPU中的drop out

直接看caffe里面的源码吧:(产生满足伯努利分布的随机数mask,train的时候,data除以p,......

scale_ = 1. / (1. - threshold_);


template <typename Dtype> void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); unsigned int* mask = rand_vec_.mutable_cpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { // Create random numbers caffe_rng_bernoulli(count, 1. - threshold_, mask); for (int i = 0; i < count; ++i) { top_data[i] = bottom_data[i] * mask[i] * scale_; } } else { caffe_copy(bottom[0]->count(), bottom_data, top_data); } } template <typename Dtype> void DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = rand_vec_.cpu_data(); const int count = bottom[0]->count(); for (int i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * mask[i] * scale_; } } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } } }

  

相关文章:

  • 2021-11-27
  • 2021-11-27
  • 2021-11-27
  • 2021-10-03
  • 2021-05-02
  • 2021-05-25
  • 2021-11-27
  • 2021-11-27
猜你喜欢
  • 2022-12-23
  • 2021-12-20
  • 2021-10-07
  • 2021-11-29
  • 2021-11-27
  • 2021-11-27
  • 2021-11-27
相关资源
相似解决方案