Caffe
common.hpp
1 #ifndef CAFFE_COMMON_HPP_
2 #define CAFFE_COMMON_HPP_
3 
4 #include <boost/shared_ptr.hpp>
5 #include <gflags/gflags.h>
6 #include <glog/logging.h>
7 
8 #include <climits>
9 #include <cmath>
10 #include <fstream> // NOLINT(readability/streams)
11 #include <iostream> // NOLINT(readability/streams)
12 #include <map>
13 #include <set>
14 #include <sstream>
15 #include <string>
16 #include <utility> // pair
17 #include <vector>
18 
19 #include "caffe/util/device_alternate.hpp"
20 
21 // Convert macro to string
22 #define STRINGIFY(m) #m
23 #define AS_STRING(m) STRINGIFY(m)
24 
25 // gflags 2.1 issue: namespace google was changed to gflags without warning.
26 // Luckily we will be able to use GFLAGS_GFLAGS_H_ to detect if it is version
27 // 2.1. If yes, we will add a temporary solution to redirect the namespace.
28 // TODO(Yangqing): Once gflags solves the problem in a more elegant way, let's
29 // remove the following hack.
30 #ifndef GFLAGS_GFLAGS_H_
31 namespace gflags = google;
32 #endif // GFLAGS_GFLAGS_H_
33 
34 // Disable the copy and assignment operator for a class.
35 #define DISABLE_COPY_AND_ASSIGN(classname) \
36 private:\
37  classname(const classname&);\
38  classname& operator=(const classname&)
39 
40 // Instantiate a class with float and double specifications.
41 #define INSTANTIATE_CLASS(classname) \
42  char gInstantiationGuard##classname; \
43  template class classname<float>; \
44  template class classname<double>
45 
46 #define INSTANTIATE_LAYER_GPU_FORWARD(classname) \
47  template void classname<float>::Forward_gpu( \
48  const std::vector<Blob<float>*>& bottom, \
49  const std::vector<Blob<float>*>& top); \
50  template void classname<double>::Forward_gpu( \
51  const std::vector<Blob<double>*>& bottom, \
52  const std::vector<Blob<double>*>& top);
53 
54 #define INSTANTIATE_LAYER_GPU_BACKWARD(classname) \
55  template void classname<float>::Backward_gpu( \
56  const std::vector<Blob<float>*>& top, \
57  const std::vector<bool>& propagate_down, \
58  const std::vector<Blob<float>*>& bottom); \
59  template void classname<double>::Backward_gpu( \
60  const std::vector<Blob<double>*>& top, \
61  const std::vector<bool>& propagate_down, \
62  const std::vector<Blob<double>*>& bottom)
63 
64 #define INSTANTIATE_LAYER_GPU_FUNCS(classname) \
65  INSTANTIATE_LAYER_GPU_FORWARD(classname); \
66  INSTANTIATE_LAYER_GPU_BACKWARD(classname)
67 
68 // A simple macro to mark codes that are not implemented, so that when the code
69 // is executed we will see a fatal log.
70 #define NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented Yet"
71 
72 // See PR #1236
73 namespace cv { class Mat; }
74 
75 namespace caffe {
76 
77 // We will use the boost shared_ptr instead of the new C++11 one mainly
78 // because cuda does not work (at least now) well with C++11 features.
79 using boost::shared_ptr;
80 
81 // Common functions and classes from std that caffe often uses.
82 using std::fstream;
83 using std::ios;
84 using std::isnan;
85 using std::isinf;
86 using std::iterator;
87 using std::make_pair;
88 using std::map;
89 using std::ostringstream;
90 using std::pair;
91 using std::set;
92 using std::string;
93 using std::stringstream;
94 using std::vector;
95 
96 // A global initialization function that you should call in your main function.
97 // Currently it initializes google flags and google logging.
98 void GlobalInit(int* pargc, char*** pargv);
99 
100 // A singleton class to hold common caffe stuff, such as the handler that
101 // caffe is going to use for cublas, curand, etc.
102 class Caffe {
103  public:
104  ~Caffe();
105 
106  // Thread local context for Caffe. Moved to common.cpp instead of
107  // including boost/thread.hpp to avoid a boost/NVCC issues (#1009, #1010)
108  // on OSX. Also fails on Linux with CUDA 7.0.18.
109  static Caffe& Get();
110 
111  enum Brew { CPU, GPU };
112 
113  // This random number generator facade hides boost and CUDA rng
114  // implementation from one another (for cross-platform compatibility).
115  class RNG {
116  public:
117  RNG();
118  explicit RNG(unsigned int seed);
119  explicit RNG(const RNG&);
120  RNG& operator=(const RNG&);
121  void* generator();
122  private:
123  class Generator;
124  shared_ptr<Generator> generator_;
125  };
126 
127  // Getters for boost rng, curand, and cublas handles
128  inline static RNG& rng_stream() {
129  if (!Get().random_generator_) {
130  Get().random_generator_.reset(new RNG());
131  }
132  return *(Get().random_generator_);
133  }
134 #ifndef CPU_ONLY
135  inline static cublasHandle_t cublas_handle() { return Get().cublas_handle_; }
136  inline static curandGenerator_t curand_generator() {
137  return Get().curand_generator_;
138  }
139 #endif
140 
141  // Returns the mode: running on CPU or GPU.
142  inline static Brew mode() { return Get().mode_; }
143  // The setters for the variables
144  // Sets the mode. It is recommended that you don't change the mode halfway
145  // into the program since that may cause allocation of pinned memory being
146  // freed in a non-pinned way, which may cause problems - I haven't verified
147  // it personally but better to note it here in the header file.
148  inline static void set_mode(Brew mode) { Get().mode_ = mode; }
149  // Sets the random seed of both boost and curand
150  static void set_random_seed(const unsigned int seed);
151  // Sets the device. Since we have cublas and curand stuff, set device also
152  // requires us to reset those values.
153  static void SetDevice(const int device_id);
154  // Prints the current GPU status.
155  static void DeviceQuery();
156  // Check if specified device is available
157  static bool CheckDevice(const int device_id);
158  // Search from start_id to the highest possible device ordinal,
159  // return the ordinal of the first available device.
160  static int FindDevice(const int start_id = 0);
161  // Parallel training
162  inline static int solver_count() { return Get().solver_count_; }
163  inline static void set_solver_count(int val) { Get().solver_count_ = val; }
164  inline static int solver_rank() { return Get().solver_rank_; }
165  inline static void set_solver_rank(int val) { Get().solver_rank_ = val; }
166  inline static bool multiprocess() { return Get().multiprocess_; }
167  inline static void set_multiprocess(bool val) { Get().multiprocess_ = val; }
168  inline static bool root_solver() { return Get().solver_rank_ == 0; }
169 
170  protected:
171 #ifndef CPU_ONLY
172  cublasHandle_t cublas_handle_;
173  curandGenerator_t curand_generator_;
174 #endif
175  shared_ptr<RNG> random_generator_;
176 
177  Brew mode_;
178 
179  // Parallel training
180  int solver_count_;
181  int solver_rank_;
182  bool multiprocess_;
183 
184  private:
185  // The private constructor to avoid duplicate instantiation.
186  Caffe();
187 
188  DISABLE_COPY_AND_ASSIGN(Caffe);
189 };
190 
191 } // namespace caffe
192 
193 #endif // CAFFE_COMMON_HPP_
A layer factory that allows one to register layers. During runtime, registered layers can be called b...
Definition: blob.hpp:14
Definition: common.hpp:73
Definition: common.hpp:115
Definition: common.cpp:239
Definition: common.hpp:102