8 #include "caffe/blob.hpp" 9 #include "caffe/common.hpp" 10 #include "caffe/layer_factory.hpp" 11 #include "caffe/proto/caffe.pb.h" 12 #include "caffe/util/math_functions.hpp" 18 namespace boost {
class mutex; }
32 template <
typename Dtype>
40 explicit Layer(
const LayerParameter& param)
41 : layer_param_(param) {
43 phase_ = param.phase();
44 if (layer_param_.blobs_size() > 0) {
45 blobs_.resize(layer_param_.blobs_size());
46 for (
int i = 0; i < layer_param_.blobs_size(); ++i) {
48 blobs_[i]->FromProto(layer_param_.blobs(i));
69 CheckBlobCounts(bottom, top);
70 LayerSetUp(bottom, top);
106 virtual void Reshape(
const vector<
Blob<Dtype>*>& bottom,
126 inline Dtype Forward(
const vector<
Blob<Dtype>*>& bottom,
150 inline void Backward(
const vector<
Blob<Dtype>*>& top,
151 const vector<bool>& propagate_down,
157 vector<shared_ptr<Blob<Dtype> > >&
blobs() {
164 const LayerParameter&
layer_param()
const {
return layer_param_; }
169 virtual void ToProto(LayerParameter* param,
bool write_diff =
false);
174 inline Dtype
loss(
const int top_index)
const {
175 return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
181 inline void set_loss(
const int top_index,
const Dtype value) {
182 if (loss_.size() <= top_index) {
183 loss_.resize(top_index + 1, Dtype(0));
185 loss_[top_index] = value;
191 virtual inline const char*
type()
const {
return ""; }
280 return (param_propagate_down_.size() > param_id) ?
281 param_propagate_down_[param_id] :
false;
288 if (param_propagate_down_.size() <= param_id) {
289 param_propagate_down_.resize(param_id + 1,
true);
291 param_propagate_down_[param_id] = value;
301 vector<shared_ptr<Blob<Dtype> > >
blobs_;
310 virtual void Forward_cpu(
const vector<
Blob<Dtype>*>& bottom,
319 return Forward_cpu(bottom, top);
326 virtual void Backward_cpu(
const vector<
Blob<Dtype>*>& top,
327 const vector<bool>& propagate_down,
335 const vector<bool>& propagate_down,
338 Backward_cpu(top, propagate_down, bottom);
348 if (ExactNumBottomBlobs() >= 0) {
349 CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
350 << type() <<
" Layer takes " << ExactNumBottomBlobs()
351 <<
" bottom blob(s) as input.";
353 if (MinBottomBlobs() >= 0) {
354 CHECK_LE(MinBottomBlobs(), bottom.size())
355 << type() <<
" Layer takes at least " << MinBottomBlobs()
356 <<
" bottom blob(s) as input.";
358 if (MaxBottomBlobs() >= 0) {
359 CHECK_GE(MaxBottomBlobs(), bottom.size())
360 << type() <<
" Layer takes at most " << MaxBottomBlobs()
361 <<
" bottom blob(s) as input.";
363 if (ExactNumTopBlobs() >= 0) {
364 CHECK_EQ(ExactNumTopBlobs(), top.size())
365 << type() <<
" Layer produces " << ExactNumTopBlobs()
366 <<
" top blob(s) as output.";
368 if (MinTopBlobs() >= 0) {
369 CHECK_LE(MinTopBlobs(), top.size())
370 << type() <<
" Layer produces at least " << MinTopBlobs()
371 <<
" top blob(s) as output.";
373 if (MaxTopBlobs() >= 0) {
374 CHECK_GE(MaxTopBlobs(), top.size())
375 << type() <<
" Layer produces at most " << MaxTopBlobs()
376 <<
" top blob(s) as output.";
378 if (EqualNumBottomTopBlobs()) {
379 CHECK_EQ(bottom.size(), top.size())
380 << type() <<
" Layer produces one top blob as output for each " 381 <<
"bottom blob input.";
390 const int num_loss_weights = layer_param_.loss_weight_size();
391 if (num_loss_weights) {
392 CHECK_EQ(top.size(), num_loss_weights) <<
"loss_weight must be " 393 "unspecified or specified once per top blob.";
394 for (
int top_id = 0; top_id < top.size(); ++top_id) {
395 const Dtype loss_weight = layer_param_.loss_weight(top_id);
396 if (loss_weight == Dtype(0)) {
continue; }
397 this->set_loss(top_id, loss_weight);
398 const int count = top[top_id]->count();
399 Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
400 caffe_set(count, loss_weight, loss_multiplier);
406 DISABLE_COPY_AND_ASSIGN(
Layer);
412 template <
typename Dtype>
416 Reshape(bottom, top);
417 switch (Caffe::mode()) {
419 Forward_cpu(bottom, top);
420 for (
int top_id = 0; top_id < top.size(); ++top_id) {
421 if (!this->loss(top_id)) {
continue; }
422 const int count = top[top_id]->count();
423 const Dtype* data = top[top_id]->cpu_data();
424 const Dtype* loss_weights = top[top_id]->cpu_diff();
425 loss += caffe_cpu_dot(count, data, loss_weights);
429 Forward_gpu(bottom, top);
431 for (
int top_id = 0; top_id < top.size(); ++top_id) {
432 if (!this->loss(top_id)) {
continue; }
433 const int count = top[top_id]->count();
434 const Dtype* data = top[top_id]->gpu_data();
435 const Dtype* loss_weights = top[top_id]->gpu_diff();
437 caffe_gpu_dot(count, data, loss_weights, &blob_loss);
443 LOG(FATAL) <<
"Unknown caffe mode.";
448 template <
typename Dtype>
450 const vector<bool>& propagate_down,
452 switch (Caffe::mode()) {
454 Backward_cpu(top, propagate_down, bottom);
457 Backward_gpu(top, propagate_down, bottom);
460 LOG(FATAL) <<
"Unknown caffe mode.";
465 template <
typename Dtype>
468 param->CopyFrom(layer_param_);
469 param->clear_blobs();
470 for (
int i = 0; i < blobs_.size(); ++i) {
471 blobs_[i]->
ToProto(param->add_blobs(), write_diff);
477 #endif // CAFFE_LAYER_H_ virtual void Backward_gpu(const vector< Blob< Dtype > *> &top, const vector< bool > &propagate_down, const vector< Blob< Dtype > *> &bottom)
Using the GPU device, compute the gradients for any parameters and for the bottom blobs if propagate_...
Definition: layer.hpp:334
vector< Dtype > loss_
Definition: layer.hpp:307
virtual void Forward_gpu(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Using the GPU device, compute the layer output. Fall back to Forward_cpu() if unavailable.
Definition: layer.hpp:316
An interface for the units of computation which can be composed into a Net.
Definition: layer.hpp:33
A layer factory that allows one to register layers. During runtime, registered layers can be called b...
Definition: blob.hpp:14
Definition: internal_thread.hpp:10
vector< shared_ptr< Blob< Dtype > > > blobs_
Definition: layer.hpp:301
virtual const char * type() const
Returns the layer type.
Definition: layer.hpp:191
vector< shared_ptr< Blob< Dtype > > > & blobs()
Returns the vector of learnable parameter blobs.
Definition: layer.hpp:157
virtual int MaxTopBlobs() const
Returns the maximum number of top blobs required by the layer, or -1 if no maximum number is required...
Definition: layer.hpp:240
void SetLossWeights(const vector< Blob< Dtype > *> &top)
Definition: layer.hpp:389
vector< bool > param_propagate_down_
Definition: layer.hpp:303
virtual int ExactNumBottomBlobs() const
Returns the exact number of bottom blobs required by the layer, or -1 if no exact number is required...
Definition: layer.hpp:200
virtual int MaxBottomBlobs() const
Returns the maximum number of bottom blobs required by the layer, or -1 if no maximum number is requi...
Definition: layer.hpp:216
void set_param_propagate_down(const int param_id, const bool value)
Sets whether the layer should compute gradients w.r.t. a parameter at a particular index given by par...
Definition: layer.hpp:287
const LayerParameter & layer_param() const
Returns the layer parameter.
Definition: layer.hpp:164
virtual int ExactNumTopBlobs() const
Returns the exact number of top blobs required by the layer, or -1 if no exact number is required...
Definition: layer.hpp:224
void SetUp(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Implements common layer setup functionality.
Definition: layer.hpp:67
Layer(const LayerParameter ¶m)
Definition: layer.hpp:40
virtual bool AllowForceBackward(const int bottom_index) const
Return whether to allow force_backward for a given bottom blob index.
Definition: layer.hpp:268
virtual bool AutoTopBlobs() const
Return whether "anonymous" top blobs are created automatically by the layer.
Definition: layer.hpp:258
virtual int MinTopBlobs() const
Returns the minimum number of top blobs required by the layer, or -1 if no minimum number is required...
Definition: layer.hpp:232
virtual void ToProto(LayerParameter *param, bool write_diff=false)
Writes the layer parameter to a protocol buffer.
Definition: layer.hpp:466
Phase phase_
Definition: layer.hpp:299
LayerParameter layer_param_
Definition: layer.hpp:297
virtual void LayerSetUp(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Does layer-specific setup: your layer should implement this function as well as Reshape.
Definition: layer.hpp:91
void set_loss(const int top_index, const Dtype value)
Sets the loss associated with a top blob at a given index.
Definition: layer.hpp:181
Dtype loss(const int top_index) const
Returns the scalar loss associated with a top blob at a given index.
Definition: layer.hpp:174
virtual int MinBottomBlobs() const
Returns the minimum number of bottom blobs required by the layer, or -1 if no minimum number is requi...
Definition: layer.hpp:208
virtual void CheckBlobCounts(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Definition: layer.hpp:346
bool param_propagate_down(const int param_id)
Specifies whether the layer should compute gradients w.r.t. a parameter at a particular index given b...
Definition: layer.hpp:279
virtual bool EqualNumBottomTopBlobs() const
Returns true if the layer requires an equal number of bottom and top blobs.
Definition: layer.hpp:248
A wrapper around SyncedMemory holders serving as the basic computational unit through which Layers...
Definition: blob.hpp:24