diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index b5dad398448f75..fd8efe3c6f0a85 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -3230,6 +3230,29 @@ void OperatorWithKernel::BuildPhiKernelContext( } VLOG(4) << "Done attributes"; +// Clear All old attrs before add new attrs, +// because sometimes old attrs may be misused. +#if defined(PADDLE_WITH_MKLDNN) + if (phi::OneDNNContext::classof(dev_ctx)) { + phi::OneDNNContext* one_dnn_ctx = static_cast(dev_ctx); + one_dnn_ctx->ClearDnnAttr(); + } +#endif + + // Note(YuanRisheng): Now, we can't open code below. + // Because some unittest run OLD dygraph and ExtraAttr is not supported in OLD + // dygraph. So, here we use trick that dev_ctx is a global object. We can + // store ExtraAttr in static graph and when unittest run OLD dygraph, it can + // obtain these ExtraAttr. We can open this code when OLD dygraph is no longer + // used. + /* + #if defined(PADDLE_WITH_CUDA) + if(phi::GPUContext::classof(dev_ctx)) { + phi::GPUContext* gpu_dnn_ctx = static_cast(dev_ctx); + gpu_dnn_ctx->ClearDnnAttr(); + } + #endif + */ // For compatible with Op with extra attrs for specific backend #if defined(PADDLE_WITH_MKLDNN) || defined(PADDLE_WITH_CUDA) auto& runtime_attrs = RuntimeAttrs(); diff --git a/paddle/phi/backends/gpu/gpu_context.cc b/paddle/phi/backends/gpu/gpu_context.cc index 0e102911442f2e..e4dcf3908f957a 100644 --- a/paddle/phi/backends/gpu/gpu_context.cc +++ b/paddle/phi/backends/gpu/gpu_context.cc @@ -740,6 +740,8 @@ struct GPUContext::Impl { dnn_attrs_[attr_name] = attr; } + void ClearDnnAttr() { dnn_attrs_.clear(); } + // use one flag for all handles? // they should be accessed consistently bool owned_{false}; @@ -1042,4 +1044,6 @@ void GPUContext::SetDnnAttr(const std::string& attr_name, Attribute attr) { return impl_->SetDnnAttr(attr_name, std::move(attr)); } +void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); } + } // namespace phi diff --git a/paddle/phi/backends/gpu/gpu_context.h b/paddle/phi/backends/gpu/gpu_context.h index 84aba73fad1e12..0b34d95eaf0d21 100644 --- a/paddle/phi/backends/gpu/gpu_context.h +++ b/paddle/phi/backends/gpu/gpu_context.h @@ -172,6 +172,7 @@ class PADDLE_API GPUContext : public DeviceContext, bool HasDnnAttr(const std::string& attr_name) const; const Attribute& GetDnnAttr(const std::string& attr_name) const; void SetDnnAttr(const std::string& attr_name, Attribute attr); + void ClearDnnAttr(); static const char* name() { return "GPUContext"; } diff --git a/paddle/phi/backends/onednn/onednn_context.cc b/paddle/phi/backends/onednn/onednn_context.cc index 6c69191c944573..2678506f0b9a84 100644 --- a/paddle/phi/backends/onednn/onednn_context.cc +++ b/paddle/phi/backends/onednn/onednn_context.cc @@ -301,6 +301,8 @@ struct OneDNNContext::Impl { dnn_attrs_[attr_name] = attr; } + void ClearDnnAttr() { dnn_attrs_.clear(); } + bool HasDnnInput(const std::string& input_name) const { return dnn_inputs_.count(input_name) != 0UL; } @@ -425,6 +427,8 @@ void OneDNNContext::SetDnnAttr(const std::string& attr_name, Attribute attr) { return impl_->SetDnnAttr(attr_name, std::move(attr)); } +void OneDNNContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); } + bool OneDNNContext::HasDnnInput(const std::string& input_name) const { return impl_->HasDnnInput(input_name); } diff --git a/paddle/phi/backends/onednn/onednn_context.h b/paddle/phi/backends/onednn/onednn_context.h index 9035aef5f9a328..79eaa05948c622 100644 --- a/paddle/phi/backends/onednn/onednn_context.h +++ b/paddle/phi/backends/onednn/onednn_context.h @@ -146,6 +146,8 @@ class OneDNNContext : public CPUContext { const DenseTensor* GetDnnInput(const std::string& input_name) const; void SetDnnInput(const std::string& input_name, const DenseTensor* input); + void ClearDnnAttr(); + void SetInputsName(const TensorNameMap& inputs_name); void SetOutputsName(const TensorNameMap& outputs_name);