Caffe
lrn_layer.hpp
1 #ifndef CAFFE_LRN_LAYER_HPP_
2 #define CAFFE_LRN_LAYER_HPP_
3 
4 #include <vector>
5 
6 #include "caffe/blob.hpp"
7 #include "caffe/layer.hpp"
8 #include "caffe/proto/caffe.pb.h"
9 
10 #include "caffe/layers/eltwise_layer.hpp"
11 #include "caffe/layers/pooling_layer.hpp"
12 #include "caffe/layers/power_layer.hpp"
13 #include "caffe/layers/split_layer.hpp"
14 
15 namespace caffe {
16 
22 template <typename Dtype>
23 class LRNLayer : public Layer<Dtype> {
24  public:
25  explicit LRNLayer(const LayerParameter& param)
26  : Layer<Dtype>(param) {}
27  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
28  const vector<Blob<Dtype>*>& top);
29  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
30  const vector<Blob<Dtype>*>& top);
31 
32  virtual inline const char* type() const { return "LRN"; }
33  virtual inline int ExactNumBottomBlobs() const { return 1; }
34  virtual inline int ExactNumTopBlobs() const { return 1; }
35 
36  protected:
37  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
38  const vector<Blob<Dtype>*>& top);
39  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
40  const vector<Blob<Dtype>*>& top);
41  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
42  const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
43  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
44  const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
45 
46  virtual void CrossChannelForward_cpu(const vector<Blob<Dtype>*>& bottom,
47  const vector<Blob<Dtype>*>& top);
48  virtual void CrossChannelForward_gpu(const vector<Blob<Dtype>*>& bottom,
49  const vector<Blob<Dtype>*>& top);
50  virtual void WithinChannelForward(const vector<Blob<Dtype>*>& bottom,
51  const vector<Blob<Dtype>*>& top);
52  virtual void CrossChannelBackward_cpu(const vector<Blob<Dtype>*>& top,
53  const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
54  virtual void CrossChannelBackward_gpu(const vector<Blob<Dtype>*>& top,
55  const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
56  virtual void WithinChannelBackward(const vector<Blob<Dtype>*>& top,
57  const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
58 
59  int size_;
60  int pre_pad_;
61  Dtype alpha_;
62  Dtype beta_;
63  Dtype k_;
64  int num_;
65  int channels_;
66  int height_;
67  int width_;
68 
69  // Fields used for normalization ACROSS_CHANNELS
70  // scale_ stores the intermediate summing results
71  Blob<Dtype> scale_;
72 
73  // Fields used for normalization WITHIN_CHANNEL
74  shared_ptr<SplitLayer<Dtype> > split_layer_;
75  vector<Blob<Dtype>*> split_top_vec_;
76  shared_ptr<PowerLayer<Dtype> > square_layer_;
77  Blob<Dtype> square_input_;
78  Blob<Dtype> square_output_;
79  vector<Blob<Dtype>*> square_bottom_vec_;
80  vector<Blob<Dtype>*> square_top_vec_;
81  shared_ptr<PoolingLayer<Dtype> > pool_layer_;
82  Blob<Dtype> pool_output_;
83  vector<Blob<Dtype>*> pool_top_vec_;
84  shared_ptr<PowerLayer<Dtype> > power_layer_;
85  Blob<Dtype> power_output_;
86  vector<Blob<Dtype>*> power_top_vec_;
87  shared_ptr<EltwiseLayer<Dtype> > product_layer_;
88  Blob<Dtype> product_input_;
89  vector<Blob<Dtype>*> product_bottom_vec_;
90 };
91 
92 } // namespace caffe
93 
94 #endif // CAFFE_LRN_LAYER_HPP_
An interface for the units of computation which can be composed into a Net.
Definition: layer.hpp:33
A layer factory that allows one to register layers. During runtime, registered layers can be called b...
Definition: blob.hpp:14
virtual int ExactNumTopBlobs() const
Returns the exact number of top blobs required by the layer, or -1 if no exact number is required...
Definition: lrn_layer.hpp:34
virtual void Backward_gpu(const vector< Blob< Dtype > *> &top, const vector< bool > &propagate_down, const vector< Blob< Dtype > *> &bottom)
Using the GPU device, compute the gradients for any parameters and for the bottom blobs if propagate_...
virtual void LayerSetUp(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Does layer-specific setup: your layer should implement this function as well as Reshape.
Definition: lrn_layer.cpp:9
virtual void Reshape(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Adjust the shapes of top blobs and internal buffers to accommodate the shapes of the bottom blobs...
Definition: lrn_layer.cpp:69
virtual void Backward_cpu(const vector< Blob< Dtype > *> &top, const vector< bool > &propagate_down, const vector< Blob< Dtype > *> &bottom)
Using the CPU device, compute the gradients for any parameters and for the bottom blobs if propagate_...
Definition: lrn_layer.cpp:165
virtual int ExactNumBottomBlobs() const
Returns the exact number of bottom blobs required by the layer, or -1 if no exact number is required...
Definition: lrn_layer.hpp:33
virtual void Forward_cpu(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Using the CPU device, compute the layer output.
Definition: lrn_layer.cpp:93
virtual const char * type() const
Returns the layer type.
Definition: lrn_layer.hpp:32
virtual void Forward_gpu(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Using the GPU device, compute the layer output. Fall back to Forward_cpu() if unavailable.
Normalize the input in a local region across or within feature maps.
Definition: lrn_layer.hpp:23
A wrapper around SyncedMemory holders serving as the basic computational unit through which Layers...
Definition: blob.hpp:24