Skip to content

Remove custom glog-like and gflags-like macros #896

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 15, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 7 additions & 13 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ find_package(ZLIB REQUIRED)
find_package(NumPy REQUIRED)
find_package(Threads REQUIRED)
find_package(AVX QUIET)
find_package(Glog)
find_package(Gflags QUIET)
find_package(Glog REQUIRED)
find_package(Gflags REQUIRED)
find_package(GTest)
find_package(Sphinx)
find_package(Doxygen)
Expand All @@ -40,8 +40,6 @@ option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND})
option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON)
option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND})
option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF)
option(WITH_GLOG "Compile PaddlePaddle use glog, otherwise use a log implement internally" ${LIBGLOG_FOUND})
option(WITH_GFLAGS "Compile PaddlePaddle use gflags, otherwise use a flag implement internally" ${GFLAGS_FOUND})
option(WITH_TIMER "Compile PaddlePaddle use timer" OFF)
option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF)
option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND})
Expand Down Expand Up @@ -136,16 +134,12 @@ else(WITH_RDMA)
add_definitions(-DPADDLE_DISABLE_RDMA)
endif(WITH_RDMA)

if(WITH_GLOG)
add_definitions(-DPADDLE_USE_GLOG)
include_directories(${LIBGLOG_INCLUDE_DIR})
endif()
# glog
include_directories(${LIBGLOG_INCLUDE_DIR})

if(WITH_GFLAGS)
add_definitions(-DPADDLE_USE_GFLAGS)
add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE})
include_directories(${GFLAGS_INCLUDE_DIRS})
endif()
#gflags
add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE})
include_directories(${GFLAGS_INCLUDE_DIRS})

if(WITH_TESTING)
enable_testing()
Expand Down
8 changes: 2 additions & 6 deletions cmake/check_packages.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,9 @@ if(WITH_STYLE_CHECK)
find_package(PythonInterp REQUIRED)
endif()

if(WITH_GLOG)
find_package(Glog REQUIRED)
endif()
find_package(Glog REQUIRED)

if(WITH_GFLAGS)
find_package(Gflags REQUIRED)
endif()
find_package(Gflags REQUIRED)

if(WITH_TESTING)
find_package(GTest REQUIRED)
Expand Down
14 changes: 3 additions & 11 deletions cmake/util.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ endmacro()
# link_paddle_exe
# add paddle library for a paddle executable, such as trainer, pserver.
#
# It will handle WITH_PYTHON/WITH_GLOG etc.
# It will handle WITH_PYTHON etc.
function(link_paddle_exe TARGET_NAME)
if(WITH_RDMA)
generate_rdma_links()
Expand Down Expand Up @@ -108,6 +108,8 @@ function(link_paddle_exe TARGET_NAME)
paddle_cuda
${METRIC_LIBS}
${PROTOBUF_LIBRARY}
${LIBGLOG_LIBRARY}
${GFLAGS_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${CBLAS_LIBS}
${ZLIB_LIBRARIES}
Expand All @@ -125,16 +127,6 @@ function(link_paddle_exe TARGET_NAME)
${PYTHON_LIBRARIES})
endif()

if(WITH_GLOG)
target_link_libraries(${TARGET_NAME}
${LIBGLOG_LIBRARY})
endif()

if(WITH_GFLAGS)
target_link_libraries(${TARGET_NAME}
${GFLAGS_LIBRARIES})
endif()

if(WITH_GPU)
if(NOT WITH_DSO OR WITH_METRIC)
target_link_libraries(${TARGET_NAME}
Expand Down
4 changes: 1 addition & 3 deletions doc/getstarted/build_and_install/build_from_source_en.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,8 @@ PaddlePaddle supports some build options. To enable it, first you need to instal
<tbody>
<tr><td class="left">WITH_GPU</td><td class="left">Compile with GPU mode.</td></tr>
<tr><td class="left">WITH_DOUBLE</td><td class="left">Compile with double precision floating-point, default: single precision.</td></tr>
<tr><td class="left">WITH_GLOG</td><td class="left">Compile with glog. If not found, default: an internal log implementation.</td></tr>
<tr><td class="left">WITH_GFLAGS</td><td class="left">Compile with gflags. If not found, default: an internal flag implementation.</td></tr>
<tr><td class="left">WITH_TESTING</td><td class="left">Compile with gtest for PaddlePaddle's unit testing.</td></tr>
<tr><td class="left">WITH_DOC</td><td class="left"> Compile to generate PaddlePaddle's docs, default: disabled (OFF).</td></tr>
<tr><td class="left">WITH_DOC</td><td class="left"> Compile to generate PaddlePaddle's docs, default: disabled (OFF).</td></tr>
<tr><td class="left">WITH_SWIG_PY</td><td class="left">Compile with python predict API, default: disabled (OFF).</td></tr>
<tr><td class="left">WITH_STYLE_CHECK</td><td class="left">Compile with code style check, default: enabled (ON).</td></tr>
</tbody>
Expand Down
2 changes: 0 additions & 2 deletions doc/getstarted/build_and_install/cmake/compile_options.csv
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ WITH_AVX,是否编译含有AVX指令集的PaddlePaddle二进制文件,是
WITH_PYTHON,是否内嵌PYTHON解释器。方便今后的嵌入式移植工作。,是
WITH_STYLE_CHECK,是否编译时进行代码风格检查,是
WITH_RDMA,是否开启RDMA,否
WITH_GLOG,是否开启GLOG。如果不开启,则会使用一个简化版的日志,同时方便今后的嵌入式移植工作。,取决于是否寻找到GLOG
WITH_GFLAGS,是否使用GFLAGS。如果不开启,则会使用一个简化版的命令行参数解析器,同时方便今后的嵌入式移植工作。,取决于是否寻找到GFLAGS
WITH_TIMER,是否开启计时功能。如果开启会导致运行略慢,打印的日志变多,但是方便调试和测Benchmark,否
WITH_TESTING,是否开启单元测试,取决于是否寻找到GTEST
WITH_DOC,是否编译中英文文档,否
Expand Down
2 changes: 0 additions & 2 deletions doc/getstarted/build_and_install/ubuntu_install_cn.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ PaddlePaddle提供了ubuntu 14.04 deb安装包。
with_double: OFF
with_python: ON
with_rdma: OFF
with_glog: ON
with_gflags: ON
with_metric_learning:
with_timer: OFF
with_predict_sdk:
Expand Down
24 changes: 10 additions & 14 deletions paddle/api/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,18 @@ add_library(paddle_api STATIC
${API_SOURCES})
add_dependencies(paddle_api gen_proto_cpp)

list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH)

if(WITH_GFLAGS)
list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH)

if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}")
# Because gflags compiled by cmake, so it is imported by cmake target,
# not a real library path. Get the real library path here.
message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}")
get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION)
message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}")
else()
set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES})
endif()
if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}")
# Because gflags compiled by cmake, so it is imported by cmake target,
# not a real library path. Get the real library path here.
message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}")
get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION)
message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}")
else()
set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES})
endif()


configure_file(
paddle_api_config.py.in
${PROJ_ROOT}/paddle/api/paddle_api_config.py
Expand All @@ -57,7 +53,7 @@ add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp
paddle_trainer
paddle_api
paddle_cuda
${PY_PADDLE_PYTHON_FILES}
${PY_PADDLE_PYTHON_FILES}
)

install(DIRECTORY ${PROJ_ROOT}/paddle/dist/
Expand Down
6 changes: 3 additions & 3 deletions paddle/api/Trainer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ limitations under the License. */

using paddle::real;

P_DECLARE_string(config);
P_DECLARE_string(init_model_path);
P_DECLARE_int32(start_pass);
DECLARE_string(config);
DECLARE_string(init_model_path);
DECLARE_int32(start_pass);

struct TrainerPrivate : public paddle::Trainer {
bool _trainOneBatch(size_t batchSize);
Expand Down
2 changes: 0 additions & 2 deletions paddle/api/paddle_api_config.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@ CMAKE_DL_LIBS="@CMAKE_DL_LIBS@"

WITH_PYTHON="@WITH_PYTHON@"
PYTHON_LIBRARIES="@PYTHON_LIBRARIES@"
WITH_GLOG="@WITH_GLOG@"
LIBGLOG_LIBRARY="@LIBGLOG_LIBRARY@"
WITH_GFLAGS="@WITH_GFLAGS@"
GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@"
GFLAGS_LOCATION="@GFLAGS_LOCATION@"
CBLAS_LIBRARIES="@CBLAS_LIBS@"
Expand Down
8 changes: 2 additions & 6 deletions paddle/api/paddle_ld_flags.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,8 @@ def __init__(self):
self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON)
self.python_libs = PYTHON_LIBRARIES

self.with_glog = PaddleLDFlag.cmake_bool(WITH_GLOG)
self.glog_libs = LIBGLOG_LIBRARY

self.with_gflags = PaddleLDFlag.cmake_bool(WITH_GFLAGS)
self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS)
self.gflags_libs = GFLAGS_LIBRARIES
self.gflags_location = GFLAGS_LOCATION
Expand Down Expand Up @@ -88,6 +86,8 @@ def libs_str(self):
"-lpaddle_cuda",
"-lpaddle_api",
self.normalize_flag(self.protolib),
self.normalize_flag(self.glog_libs),
self.normalize_flag(self.gflags_libs),
self.normalize_flag(self.zlib),
self.normalize_flag(self.thread),
self.normalize_flag(self.dl_libs),
Expand All @@ -96,10 +96,6 @@ def libs_str(self):

if self.with_python:
libs.append(self.normalize_flag(self.python_libs))
if self.with_glog:
libs.append(self.normalize_flag(self.glog_libs))
if self.with_gflags:
libs.append(self.normalize_flag(self.gflags_libs))
if self.with_gpu:
libs.append(self.normalize_flag(self.curt))
if self.with_coverage:
Expand Down
8 changes: 4 additions & 4 deletions paddle/cuda/src/hl_cuda_cudnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h"

P_DEFINE_int32(cudnn_conv_workspace_limit_in_mb,
4096,
"Specify cuDNN max workspace limit, in units MB, "
"4096MB=4GB by default.");
DEFINE_int32(cudnn_conv_workspace_limit_in_mb,
4096,
"Specify cuDNN max workspace limit, in units MB, "
"4096MB=4GB by default.");

namespace dynload {

Expand Down
30 changes: 15 additions & 15 deletions paddle/cuda/src/hl_dso_loader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,21 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h"

P_DEFINE_string(cudnn_dir,
"",
"Specify path for loading libcudnn.so. For instance, "
"/usr/local/cudnn/lib. If empty [default], dlopen "
"will search cudnn from LD_LIBRARY_PATH");

P_DEFINE_string(cuda_dir,
"",
"Specify path for loading cuda library, such as libcublas, "
"libcurand. For instance, /usr/local/cuda/lib64. (Note: "
"libcudart can not be specified by cuda_dir, since some "
"build-in function in cudart already ran before main entry). "
"If default, dlopen will search cuda from LD_LIBRARY_PATH");

P_DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so.");
DEFINE_string(cudnn_dir,
"",
"Specify path for loading libcudnn.so. For instance, "
"/usr/local/cudnn/lib. If empty [default], dlopen "
"will search cudnn from LD_LIBRARY_PATH");

DEFINE_string(cuda_dir,
"",
"Specify path for loading cuda library, such as libcublas, "
"libcurand. For instance, /usr/local/cuda/lib64. (Note: "
"libcudart can not be specified by cuda_dir, since some "
"build-in function in cudart already ran before main entry). "
"If default, dlopen will search cuda from LD_LIBRARY_PATH");

DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so.");

static inline std::string join(const std::string& part1,
const std::string& part2) {
Expand Down
6 changes: 3 additions & 3 deletions paddle/gserver/dataproviders/ProtoDataProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ limitations under the License. */
#include "DataProviderGroup.h"
#include "paddle/utils/Logging.h"

P_DEFINE_double(memory_threshold_on_load_data,
1.0,
"stop loading data when memory is not sufficient");
DEFINE_double(memory_threshold_on_load_data,
1.0,
"stop loading data when memory is not sufficient");

namespace paddle {

Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/evaluators/Evaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License. */

#include "paddle/gserver/gradientmachines/NeuralNetwork.h"

P_DECLARE_int32(trainer_id);
DECLARE_int32(trainer_id);

namespace paddle {

Expand Down
8 changes: 4 additions & 4 deletions paddle/gserver/gradientmachines/MultiGradientMachine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ limitations under the License. */
#include "NeuralNetwork.h"
#include "ParallelNeuralNetwork.h"

P_DEFINE_bool(allow_only_one_model_on_one_gpu,
true,
"If true, do not allow multiple models on one GPU device");
DEFINE_bool(allow_only_one_model_on_one_gpu,
true,
"If true, do not allow multiple models on one GPU device");
#ifdef PADDLE_METRIC_LEARNING
P_DECLARE_bool(external);
DECLARE_bool(external);
#endif

namespace paddle {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ limitations under the License. */
#include "paddle/utils/Stat.h"
#include "paddle/utils/Util.h"

P_DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so");
DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so");

static const char* DIY_CALC_PROB_SYMBOL_NAME = "calc_prob";
static const char* DIY_START_CALC_PROB_SYMBOL_NAME = "start_calc_prob";
Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/layers/Layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ limitations under the License. */
#include "TransLayer.h"
#include "ValidationLayer.h"

P_DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");

namespace paddle {

Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/layers/LstmLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/math/Matrix.h"
#include "paddle/utils/Stat.h"

P_DECLARE_bool(prev_batch_state);
DECLARE_bool(prev_batch_state);

namespace paddle {

Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/layers/RecurrentLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Stat.h"

P_DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation.");
DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation.");

namespace paddle {

Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/layers/ValidationLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ limitations under the License. */
#include "Layer.h"
#include "paddle/gserver/evaluators/Evaluator.h"

P_DECLARE_int32(trainer_id);
DECLARE_int32(trainer_id);

namespace paddle {

Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/tests/LayerGradUtil.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ limitations under the License. */

#include "LayerGradUtil.h"

P_DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(thread_local_rand_use_global_seed);

namespace paddle {
real getCostSum(LayerPtr& testLayer, MatrixPtr weights) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/tests/TestUtil.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/math/SparseMatrix.h"
#include "paddle/utils/CommandLineParser.h"

P_DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length");
DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length");

namespace paddle {

Expand Down
4 changes: 2 additions & 2 deletions paddle/gserver/tests/test_ActivationGrad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT

P_DECLARE_bool(use_gpu);
P_DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu);
DECLARE_bool(thread_local_rand_use_global_seed);

void testActivation(const string& act) {
LOG(INFO) << "test activation: " << act;
Expand Down
10 changes: 5 additions & 5 deletions paddle/gserver/tests/test_BatchNorm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT

P_DECLARE_bool(use_gpu);
P_DECLARE_int32(gpu_id);
P_DECLARE_double(checkgrad_eps);
P_DECLARE_bool(thread_local_rand_use_global_seed);
P_DECLARE_bool(prev_batch_state);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_double(checkgrad_eps);
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(prev_batch_state);

// Test that the batchNormLayer can be followed by a ConvLayer
TEST(Layer, batchNorm) {
Expand Down
Loading