Skip to content

remove unused expected_kernel_cache_pass #17486

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 20, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions paddle/fluid/framework/details/build_strategy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,11 +171,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
AppendPass("runtime_context_cache_pass");
}

if (strategy_.cache_expected_kernel_) {
VLOG(10) << "Add expected_kernel_cache_pass";
AppendPass("expected_kernel_cache_pass");
}

AppendMultiDevPass(strategy_);

if (strategy_.fuse_all_reduce_ops_) {
Expand Down Expand Up @@ -371,7 +366,6 @@ USE_PASS(fuse_sgd_op_pass);
USE_PASS(fuse_momentum_op_pass);
USE_PASS(fuse_all_reduce_op_pass);
USE_PASS(runtime_context_cache_pass);
USE_PASS(expected_kernel_cache_pass);
USE_PASS(record_skip_memory_opt_vars_pass);
#ifdef PADDLE_WITH_MKLDNN
USE_PASS(mkldnn_placement_pass);
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/framework/details/build_strategy.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,6 @@ struct BuildStrategy {
bool remove_unnecessary_lock_{true};

bool cache_runtime_context_{false};
bool cache_expected_kernel_{true};
std::unordered_set<std::string> mkldnn_enabled_op_types_;

// NOTE:
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/framework/ir/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ pass_library(transpose_flatten_concat_fuse_pass inference)
pass_library(identity_scale_op_clean_pass base)
pass_library(sync_batch_norm_pass base)
pass_library(runtime_context_cache_pass base)
pass_library(expected_kernel_cache_pass base)
pass_library(quant_conv2d_dequant_fuse_pass inference)
pass_library(fillconstant_elementwisemul_fuse inference)
pass_library(shuffle_channel_detect_pass inference)
Expand Down
37 changes: 0 additions & 37 deletions paddle/fluid/framework/ir/expected_kernel_cache_pass.cc

This file was deleted.

31 changes: 0 additions & 31 deletions paddle/fluid/framework/ir/expected_kernel_cache_pass.h

This file was deleted.

6 changes: 0 additions & 6 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,6 @@ constexpr char kNewGradSuffix[] = "@NEWGRAD@";
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

/// If an Op has attribtue kEnableCacheExpectedKernel, it means that in a same
/// name scope and same place, since the expected kerenl of this Op does not
/// change in the execution, it could be recorded only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheExpectedKernel[] = "@ENABLE_CACHE_EXPECTED_KERNEL@";

/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/inference/api/analysis_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,6 @@ void AnalysisConfig::Update() {
pass_builder()->InsertPass(3, "tensorrt_subgraph_pass");
}
pass_builder()->DeletePass("runtime_context_cache_pass");
pass_builder()->DeletePass("expected_kernel_cache_pass");
}

if (use_mkldnn_) {
Expand Down
8 changes: 3 additions & 5 deletions paddle/fluid/inference/api/paddle_pass_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,8 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
"conv_elementwise_add_fuse_pass", //
#endif //
"transpose_flatten_concat_fuse_pass",
// following two passes should be located in the last, since they will
// following pass should be located in the last, since it will
// work on all fused ops.
"expected_kernel_cache_pass", //
"runtime_context_cache_pass"
});

Expand Down Expand Up @@ -134,9 +133,8 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
"conv_bn_fuse_pass", //
"conv_eltwiseadd_bn_fuse_pass", //
"is_test_pass", //
// following two passes should be located in the last, since
// they will work on all fused ops.
"expected_kernel_cache_pass", //
// following pass should be located in the last, since
// it will work on all fused ops.
"runtime_context_cache_pass"});

use_gpu_ = false;
Expand Down
5 changes: 1 addition & 4 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License. */
#include <mutex> // NOLINT // for call_once
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>

Expand Down Expand Up @@ -1492,10 +1493,6 @@ All parameter, weight, gradient are variables in Paddle.
"cache_runtime_context",
[](const BuildStrategy &self) { return self.cache_runtime_context_; },
[](BuildStrategy &self, bool b) { self.cache_runtime_context_ = b; })
.def_property(
"cache_expected_kernel",
[](const BuildStrategy &self) { return self.cache_expected_kernel_; },
[](BuildStrategy &self, bool b) { self.cache_expected_kernel_ = b; })
.def_property(
"mkldnn_enabled_op_types",
[](const BuildStrategy &self) {
Expand Down