|
| PReLULayer (const LayerParameter ¶m) |
|
virtual void | LayerSetUp (const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top) |
| Does layer-specific setup: your layer should implement this function as well as Reshape. More...
|
|
virtual void | Reshape (const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top) |
| Adjust the shapes of top blobs and internal buffers to accommodate the shapes of the bottom blobs. More...
|
|
virtual const char * | type () const |
| Returns the layer type.
|
|
| NeuronLayer (const LayerParameter ¶m) |
|
virtual int | ExactNumBottomBlobs () const |
| Returns the exact number of bottom blobs required by the layer, or -1 if no exact number is required. More...
|
|
virtual int | ExactNumTopBlobs () const |
| Returns the exact number of top blobs required by the layer, or -1 if no exact number is required. More...
|
|
| Layer (const LayerParameter ¶m) |
|
void | SetUp (const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top) |
| Implements common layer setup functionality. More...
|
|
Dtype | Forward (const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top) |
| Given the bottom blobs, compute the top blobs and the loss. More...
|
|
void | Backward (const vector< Blob< Dtype > *> &top, const vector< bool > &propagate_down, const vector< Blob< Dtype > *> &bottom) |
| Given the top blob error gradients, compute the bottom blob error gradients. More...
|
|
vector< shared_ptr< Blob< Dtype > > > & | blobs () |
| Returns the vector of learnable parameter blobs.
|
|
const LayerParameter & | layer_param () const |
| Returns the layer parameter.
|
|
virtual void | ToProto (LayerParameter *param, bool write_diff=false) |
| Writes the layer parameter to a protocol buffer.
|
|
Dtype | loss (const int top_index) const |
| Returns the scalar loss associated with a top blob at a given index.
|
|
void | set_loss (const int top_index, const Dtype value) |
| Sets the loss associated with a top blob at a given index.
|
|
virtual int | MinBottomBlobs () const |
| Returns the minimum number of bottom blobs required by the layer, or -1 if no minimum number is required. More...
|
|
virtual int | MaxBottomBlobs () const |
| Returns the maximum number of bottom blobs required by the layer, or -1 if no maximum number is required. More...
|
|
virtual int | MinTopBlobs () const |
| Returns the minimum number of top blobs required by the layer, or -1 if no minimum number is required. More...
|
|
virtual int | MaxTopBlobs () const |
| Returns the maximum number of top blobs required by the layer, or -1 if no maximum number is required. More...
|
|
virtual bool | EqualNumBottomTopBlobs () const |
| Returns true if the layer requires an equal number of bottom and top blobs. More...
|
|
virtual bool | AutoTopBlobs () const |
| Return whether "anonymous" top blobs are created automatically by the layer. More...
|
|
virtual bool | AllowForceBackward (const int bottom_index) const |
| Return whether to allow force_backward for a given bottom blob index. More...
|
|
bool | param_propagate_down (const int param_id) |
| Specifies whether the layer should compute gradients w.r.t. a parameter at a particular index given by param_id. More...
|
|
void | set_param_propagate_down (const int param_id, const bool value) |
| Sets whether the layer should compute gradients w.r.t. a parameter at a particular index given by param_id.
|
|
|
virtual void | Forward_cpu (const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top) |
|
virtual void | Forward_gpu (const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top) |
| Using the GPU device, compute the layer output. Fall back to Forward_cpu() if unavailable.
|
|
virtual void | Backward_cpu (const vector< Blob< Dtype > *> &top, const vector< bool > &propagate_down, const vector< Blob< Dtype > *> &bottom) |
| Computes the error gradient w.r.t. the PReLU inputs. More...
|
|
virtual void | Backward_gpu (const vector< Blob< Dtype > *> &top, const vector< bool > &propagate_down, const vector< Blob< Dtype > *> &bottom) |
| Using the GPU device, compute the gradients for any parameters and for the bottom blobs if propagate_down is true. Fall back to Backward_cpu() if unavailable.
|
|
virtual void | CheckBlobCounts (const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top) |
|
void | SetLossWeights (const vector< Blob< Dtype > *> &top) |
|
template<typename Dtype>
class caffe::PReLULayer< Dtype >
Parameterized Rectified Linear Unit non-linearity . The differences from ReLULayer are 1) negative slopes are learnable though backprop and 2) negative slopes can vary across channels. The number of axes of input blob should be greater than or equal to 2. The 1st axis (0-based) is seen as channels.
template<typename Dtype >
void caffe::PReLULayer< Dtype >::LayerSetUp |
( |
const vector< Blob< Dtype > *> & |
bottom, |
|
|
const vector< Blob< Dtype > *> & |
top |
|
) |
| |
|
virtual |
Does layer-specific setup: your layer should implement this function as well as Reshape.
- Parameters
-
bottom | the preshaped input blobs, whose data fields store the input data for this layer |
top | the allocated but unshaped output blobs |
This method should do one-time layer specific setup. This includes reading and processing relevent parameters from the layer_param_
. Setting up the shapes of top blobs and internal buffers should be done in Reshape
, which will be called before the forward pass to adjust the top blob sizes.
Reimplemented from caffe::Layer< Dtype >.