Skip to content

Commit c588c64

Browse files
committed
Merge remote-tracking branch 'ups/develop' into refine/op/gru
2 parents 171a0e2 + 842fb02 commit c588c64

File tree

80 files changed

+1937
-1057
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

80 files changed

+1937
-1057
lines changed

CMakeLists.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,12 +204,11 @@ include(external/snappy) # download snappy
204204
include(external/snappystream)
205205
include(external/threadpool)
206206

207+
set(WITH_ANAKIN OFF CACHE STRING "Disable Anakin first, will add it later." FORCE)
207208
if(WITH_GPU)
208209
include(cuda)
209210
include(tensorrt)
210211
include(external/anakin)
211-
else()
212-
set(WITH_ANAKIN OFF CACHE STRING "Anakin is valid only when GPU is set." FORCE)
213212
endif()
214213

215214
include(cudnn) # set cudnn libraries, must before configure

cmake/configure.cmake

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,14 @@ if(WITH_GPU)
9797
endif()
9898
include_directories(${TENSORRT_INCLUDE_DIR})
9999
endif()
100+
if(WITH_ANAKIN)
101+
if(${CUDA_VERSION_MAJOR} VERSION_LESS 8)
102+
message(FATAL_ERROR "Anakin needs CUDA >= 8.0 to compile")
103+
endif()
104+
if(${CUDNN_MAJOR_VERSION} VERSION_LESS 7)
105+
message(FATAL_ERROR "Anakin needs CUDNN >= 7.0 to compile")
106+
endif()
107+
endif()
100108
elseif(WITH_AMD_GPU)
101109
add_definitions(-DPADDLE_WITH_HIP)
102110
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__HIP_PLATFORM_HCC__")

cmake/external/anakin.cmake

Lines changed: 45 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,22 @@ if (NOT WITH_ANAKIN)
22
return()
33
endif()
44

5-
set(ANAKIN_INSTALL_DIR "${THIRD_PARTY_PATH}/install/anakin" CACHE PATH
6-
"Anakin install path." FORCE)
7-
set(ANAKIN_INCLUDE "${ANAKIN_INSTALL_DIR}" CACHE STRING "root of Anakin header files")
8-
set(ANAKIN_LIBRARY "${ANAKIN_INSTALL_DIR}" CACHE STRING "path of Anakin library")
5+
INCLUDE(ExternalProject)
6+
set(ANAKIN_SOURCE_DIR ${THIRD_PARTY_PATH}/anakin)
7+
# the anakin install dir is only default one now
8+
set(ANAKIN_INSTALL_DIR ${THIRD_PARTY_PATH}/anakin/src/extern_anakin/output)
9+
set(ANAKIN_INCLUDE ${ANAKIN_INSTALL_DIR})
10+
set(ANAKIN_LIBRARY ${ANAKIN_INSTALL_DIR})
11+
set(ANAKIN_SHARED_LIB ${ANAKIN_LIBRARY}/libanakin.so)
12+
set(ANAKIN_SABER_LIB ${ANAKIN_LIBRARY}/libanakin_saber_common.so)
13+
14+
# TODO(luotao): ANAKIN_MODLE_URL will move to demo ci later.
15+
set(ANAKIN_MODLE_URL "http://paddle-inference-dist.bj.bcebos.com/mobilenet_v2.anakin.bin")
16+
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_SOURCE_DIR}")
17+
execute_process(COMMAND bash -c "cd ${ANAKIN_SOURCE_DIR}; wget -q --no-check-certificate ${ANAKIN_MODLE_URL}")
18+
19+
include_directories(${ANAKIN_INCLUDE})
20+
include_directories(${ANAKIN_INCLUDE}/saber/)
921

1022
set(ANAKIN_COMPILE_EXTRA_FLAGS
1123
-Wno-error=unused-but-set-variable -Wno-unused-but-set-variable
@@ -20,36 +32,33 @@ set(ANAKIN_COMPILE_EXTRA_FLAGS
2032
-Wno-reorder
2133
-Wno-error=cpp)
2234

23-
set(ANAKIN_LIBRARY_URL "https://github.com/pangge/Anakin/releases/download/Version0.1.0/anakin.tar.gz")
24-
25-
# A helper function used in Anakin, currently, to use it, one need to recursively include
26-
# nearly all the header files.
27-
function(fetch_include_recursively root_dir)
28-
if (IS_DIRECTORY ${root_dir})
29-
include_directories(${root_dir})
30-
endif()
31-
32-
file(GLOB ALL_SUB RELATIVE ${root_dir} ${root_dir}/*)
33-
foreach(sub ${ALL_SUB})
34-
if (IS_DIRECTORY ${root_dir}/${sub})
35-
fetch_include_recursively(${root_dir}/${sub})
36-
endif()
37-
endforeach()
38-
endfunction()
39-
40-
if (NOT EXISTS "${ANAKIN_INSTALL_DIR}")
41-
# download library
42-
message(STATUS "Download Anakin library from ${ANAKIN_LIBRARY_URL}")
43-
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
44-
execute_process(COMMAND bash -c "rm -rf ${ANAKIN_INSTALL_DIR}/*")
45-
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; wget --no-check-certificate -q ${ANAKIN_LIBRARY_URL}")
46-
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
47-
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; tar xzf anakin.tar.gz")
48-
endif()
35+
ExternalProject_Add(
36+
extern_anakin
37+
${EXTERNAL_PROJECT_LOG_ARGS}
38+
# TODO(luotao): use PaddlePaddle/Anakin later
39+
GIT_REPOSITORY "https://github.com/luotao1/Anakin"
40+
GIT_TAG "3957ae9263eaa0b1986758dac60a88852afb09be"
41+
PREFIX ${ANAKIN_SOURCE_DIR}
42+
UPDATE_COMMAND ""
43+
CMAKE_ARGS -DUSE_GPU_PLACE=YES
44+
-DUSE_X86_PLACE=YES
45+
-DBUILD_WITH_UNIT_TEST=NO
46+
-DPROTOBUF_ROOT=${THIRD_PARTY_PATH}/install/protobuf
47+
-DMKLML_ROOT=${THIRD_PARTY_PATH}/install/mklml
48+
-DCUDNN_ROOT=${CUDNN_ROOT}
49+
${EXTERNAL_OPTIONAL_ARGS}
50+
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ANAKIN_INSTALL_DIR}
51+
)
4952

50-
if (WITH_ANAKIN)
51-
message(STATUS "Anakin for inference is enabled")
52-
message(STATUS "Anakin is set INCLUDE:${ANAKIN_INCLUDE} LIBRARY:${ANAKIN_LIBRARY}")
53-
fetch_include_recursively(${ANAKIN_INCLUDE})
54-
link_directories(${ANAKIN_LIBRARY})
55-
endif()
53+
message(STATUS "Anakin for inference is enabled")
54+
message(STATUS "Anakin is set INCLUDE:${ANAKIN_INCLUDE} LIBRARY:${ANAKIN_LIBRARY}")
55+
56+
add_library(anakin_shared SHARED IMPORTED GLOBAL)
57+
set_property(TARGET anakin_shared PROPERTY IMPORTED_LOCATION ${ANAKIN_SHARED_LIB})
58+
add_dependencies(anakin_shared extern_anakin protobuf mklml)
59+
60+
add_library(anakin_saber SHARED IMPORTED GLOBAL)
61+
set_property(TARGET anakin_saber PROPERTY IMPORTED_LOCATION ${ANAKIN_SABER_LIB})
62+
add_dependencies(anakin_saber extern_anakin protobuf mklml)
63+
64+
list(APPEND external_project_dependencies anakin_shared anakin_saber)

cmake/external/mkldnn.cmake

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ SET(MKLDNN_INSTALL_DIR ${THIRD_PARTY_PATH}/install/mkldnn)
2424
SET(MKLDNN_INC_DIR "${MKLDNN_INSTALL_DIR}/include" CACHE PATH "mkldnn include directory." FORCE)
2525

2626
IF(WIN32 OR APPLE)
27-
MESSAGE(WARNING
27+
MESSAGE(WARNING
2828
"Windows or Mac is not supported with MKLDNN in Paddle yet."
2929
"Force WITH_MKLDNN=OFF")
3030
SET(WITH_MKLDNN OFF CACHE STRING "Disable MKLDNN in Windows and MacOS" FORCE)
@@ -57,8 +57,10 @@ ExternalProject_Add(
5757
GIT_TAG "a29d8487a63afca3d5b8c5bbdbb473cf8ccc6e51"
5858
PREFIX ${MKLDNN_SOURCES_DIR}
5959
UPDATE_COMMAND ""
60+
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
61+
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
6062
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR}
61-
CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
63+
CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
6264
CMAKE_ARGS -DMKLROOT=${MKLML_ROOT}
6365
CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG}
6466
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG}

cmake/inference_lib.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ if (WITH_ANAKIN AND WITH_GPU)
143143
copy(anakin_inference_lib DEPS paddle_inference_api inference_anakin_api
144144
SRCS
145145
${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libinference_anakin_api* # compiled anakin api
146-
${PADDLE_BINARY_DIR}/third_party/install/anakin/*.tar.gz # anakin release
146+
${ANAKIN_INSTALL_DIR} # anakin release
147147
DSTS ${dst_dir}/inference/anakin ${dst_dir}/inference/anakin)
148148
list(APPEND inference_deps anakin_inference_lib)
149149
endif()

doc/fluid/api/executor.rst

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,3 @@ _switch_scope
3838
.. autofunction:: paddle.fluid.executor._switch_scope
3939
:noindex:
4040

41-
.. _api_fluid_executor_fetch_var:
42-
43-
fetch_var
44-
---------
45-
46-
.. autofunction:: paddle.fluid.executor.fetch_var
47-
:noindex:
48-

doc/fluid/api/fluid.rst

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -106,22 +106,6 @@ _switch_scope
106106
.. autofunction:: paddle.fluid._switch_scope
107107
:noindex:
108108

109-
.. _api_fluid_fetch_var:
110-
111-
fetch_var
112-
---------
113-
114-
.. autofunction:: paddle.fluid.fetch_var
115-
:noindex:
116-
117-
.. _api_fluid_Go:
118-
119-
Go
120-
--
121-
122-
.. autoclass:: paddle.fluid.Go
123-
:members:
124-
:noindex:
125109

126110
.. _api_fluid_make_channel:
127111

doc/fluid/design/ir/draft.md renamed to doc/fluid/design/ir/overview.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,8 @@ graph = PassRegistry::Instance().Get("op_fuse_pass").Apply(std::move(grah));
177177
auto mem_opt_pass = PassRegistry::Instance().Get("memory_optimization_pass");
178178
mem_opt_pass.SetNotOwned<int>("optimize_level", 1);
179179
mem_opt_pass->Apply(std::move(graph));
180-
graph = PassRegistry::Instance().Get("multi_device_pass").Apply(std::move(grah));
181-
graph = PassRegistry::Instance().Get("multi_device_check_pass").Apply(std::move(grah));
180+
graph = PassRegistry::Instance().Get("multi_devices_pass").Apply(std::move(grah));
181+
graph = PassRegistry::Instance().Get("multi_devices_check_pass").Apply(std::move(grah));
182182
Executor exe;
183183
exe.Run(graph);
184184

paddle/fluid/API.spec

Lines changed: 5 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ paddle.fluid.Program.create_block ArgSpec(args=['self', 'parent_idx'], varargs=N
66
paddle.fluid.Program.current_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
77
paddle.fluid.Program.get_desc ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
88
paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
9-
paddle.fluid.Program.inference_optimize ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
9+
paddle.fluid.Program.inference_optimize ArgSpec(args=['self', 'export_for_deployment'], varargs=None, keywords=None, defaults=(True,))
1010
paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
1111
paddle.fluid.Program.optimized_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
1212
paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None)
@@ -18,6 +18,9 @@ paddle.fluid.Operator.all_attrs ArgSpec(args=['self'], varargs=None, keywords=No
1818
paddle.fluid.Operator.attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
1919
paddle.fluid.Operator.attr_type ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
2020
paddle.fluid.Operator.block_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
21+
paddle.fluid.Operator.block_attr_id ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
22+
paddle.fluid.Operator.blocks_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
23+
paddle.fluid.Operator.blocks_attr_ids ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
2124
paddle.fluid.Operator.has_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
2225
paddle.fluid.Operator.has_kernel ArgSpec(args=['self', 'op_type'], varargs=None, keywords=None, defaults=None)
2326
paddle.fluid.Operator.input ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None)
@@ -34,21 +37,10 @@ paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None,
3437
paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
3538
paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,))
3639
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
37-
paddle.fluid.Executor.as_lodtensor ArgSpec(args=['self', 'data'], varargs=None, keywords=None, defaults=None)
3840
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
3941
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
4042
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
4143
paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
42-
paddle.fluid.fetch_var ArgSpec(args=['name', 'scope', 'return_numpy'], varargs=None, keywords=None, defaults=(None, True))
43-
paddle.fluid.Go.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
44-
paddle.fluid.Go.construct_go_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
45-
paddle.fluid.make_channel ArgSpec(args=['dtype', 'capacity'], varargs=None, keywords=None, defaults=(0,))
46-
paddle.fluid.channel_send ArgSpec(args=['channel', 'value', 'is_copy'], varargs=None, keywords=None, defaults=(False,))
47-
paddle.fluid.channel_recv ArgSpec(args=['channel', 'return_value'], varargs=None, keywords=None, defaults=None)
48-
paddle.fluid.channel_close ArgSpec(args=['channel'], varargs=None, keywords=None, defaults=None)
49-
paddle.fluid.Select.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
50-
paddle.fluid.Select.case ArgSpec(args=['self', 'channel_action_fn', 'channel', 'value', 'is_copy'], varargs=None, keywords=None, defaults=(False,))
51-
paddle.fluid.Select.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
5244
paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None))
5345
paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None)
5446
paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
@@ -62,20 +54,16 @@ paddle.fluid.CheckpointConfig.__init__ ArgSpec(args=['self', 'checkpoint_dir', '
6254
paddle.fluid.Inferencer.__init__ ArgSpec(args=['self', 'infer_func', 'param_path', 'place', 'parallel'], varargs=None, keywords=None, defaults=(None, False))
6355
paddle.fluid.Inferencer.infer ArgSpec(args=['self', 'inputs', 'return_numpy'], varargs=None, keywords=None, defaults=(True,))
6456
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
65-
paddle.fluid.DistributeTranspiler.create_splited_vars ArgSpec(args=['self', 'source_var', 'block', 'tag'], varargs=None, keywords=None, defaults=None)
6657
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
6758
paddle.fluid.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program'], varargs=None, keywords=None, defaults=None)
6859
paddle.fluid.DistributeTranspiler.get_trainer_program ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
6960
paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True))
7061
paddle.fluid.InferenceTranspiler.__init__
71-
paddle.fluid.InferenceTranspiler.fuse_batch_norm ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=None)
72-
paddle.fluid.InferenceTranspiler.fuse_relu_mkldnn ArgSpec(args=['self', 'program'], varargs=None, keywords=None, defaults=None)
7362
paddle.fluid.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,))
7463
paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
7564
paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))
7665
paddle.fluid.DistributeTranspilerConfig.__init__
7766
paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id'], varargs=None, keywords='kwargs', defaults=(None, None, None, None, None, 1, 0))
78-
paddle.fluid.ParallelExecutor.bcast_params ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
7967
paddle.fluid.ParallelExecutor.run ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True))
8068
paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ExecutionStrategy) -> None
8169
paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.GradientScaleStrategy, arg0: int) -> None
@@ -89,7 +77,7 @@ paddle.fluid.io.save_persistables ArgSpec(args=['executor', 'dirname', 'main_pro
8977
paddle.fluid.io.load_vars ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None))
9078
paddle.fluid.io.load_params ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None))
9179
paddle.fluid.io.load_persistables ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None))
92-
paddle.fluid.io.save_inference_model ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename'], varargs=None, keywords=None, defaults=(None, None, None))
80+
paddle.fluid.io.save_inference_model ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True))
9381
paddle.fluid.io.load_inference_model ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename'], varargs=None, keywords=None, defaults=(None, None))
9482
paddle.fluid.io.get_inference_program ArgSpec(args=['target_vars', 'main_program'], varargs=None, keywords=None, defaults=(None,))
9583
paddle.fluid.initializer.ConstantInitializer.__init__ ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False))
@@ -338,14 +326,11 @@ paddle.fluid.contrib.BeamSearchDecoder.read_array ArgSpec(args=['self', 'init',
338326
paddle.fluid.contrib.BeamSearchDecoder.update_array ArgSpec(args=['self', 'array', 'value'], varargs=None, keywords=None, defaults=None)
339327
paddle.fluid.contrib.memory_usage ArgSpec(args=['program', 'batch_size'], varargs=None, keywords=None, defaults=None)
340328
paddle.fluid.transpiler.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
341-
paddle.fluid.transpiler.DistributeTranspiler.create_splited_vars ArgSpec(args=['self', 'source_var', 'block', 'tag'], varargs=None, keywords=None, defaults=None)
342329
paddle.fluid.transpiler.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
343330
paddle.fluid.transpiler.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program'], varargs=None, keywords=None, defaults=None)
344331
paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
345332
paddle.fluid.transpiler.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True))
346333
paddle.fluid.transpiler.InferenceTranspiler.__init__
347-
paddle.fluid.transpiler.InferenceTranspiler.fuse_batch_norm ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=None)
348-
paddle.fluid.transpiler.InferenceTranspiler.fuse_relu_mkldnn ArgSpec(args=['self', 'program'], varargs=None, keywords=None, defaults=None)
349334
paddle.fluid.transpiler.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,))
350335
paddle.fluid.transpiler.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
351336
paddle.fluid.transpiler.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))

paddle/fluid/framework/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ else()
100100
endif()
101101

102102

103-
cc_library(parallel_executor SRCS parallel_executor.cc DEPS threaded_ssa_graph_executor scope_buffered_ssa_graph_executor graph graph_viz_pass multi_devices_graph_builder ssa_graph_printer ssa_graph_checker)
103+
cc_library(parallel_executor SRCS parallel_executor.cc DEPS threaded_ssa_graph_executor scope_buffered_ssa_graph_executor graph graph_viz_pass multi_devices_graph_pass multi_devices_graph_print_pass multi_devices_graph_check_pass)
104104

105105
cc_library(prune SRCS prune.cc DEPS framework_proto)
106106
cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context)

0 commit comments

Comments
 (0)