Skip to content

[BugFix]Fix OneDNN Kernels Bug when use pass #48364

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Nov 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3230,6 +3230,29 @@ void OperatorWithKernel::BuildPhiKernelContext(
}
VLOG(4) << "Done attributes";

// Clear All old attrs before add new attrs,
// because sometimes old attrs may be misused.
#if defined(PADDLE_WITH_MKLDNN)
if (phi::OneDNNContext::classof(dev_ctx)) {
phi::OneDNNContext* one_dnn_ctx = static_cast<phi::OneDNNContext*>(dev_ctx);
one_dnn_ctx->ClearDnnAttr();
}
#endif

// Note(YuanRisheng): Now, we can't open code below.
// Because some unittest run OLD dygraph and ExtraAttr is not supported in OLD
// dygraph. So, here we use trick that dev_ctx is a global object. We can
// store ExtraAttr in static graph and when unittest run OLD dygraph, it can
// obtain these ExtraAttr. We can open this code when OLD dygraph is no longer
// used.
/*
#if defined(PADDLE_WITH_CUDA)
if(phi::GPUContext::classof(dev_ctx)) {
phi::GPUContext* gpu_dnn_ctx = static_cast<phi::GPUContext*>(dev_ctx);
gpu_dnn_ctx->ClearDnnAttr();
}
#endif
*/
// For compatible with Op with extra attrs for specific backend
#if defined(PADDLE_WITH_MKLDNN) || defined(PADDLE_WITH_CUDA)
auto& runtime_attrs = RuntimeAttrs();
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/backends/gpu/gpu_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -740,6 +740,8 @@ struct GPUContext::Impl {
dnn_attrs_[attr_name] = attr;
}

void ClearDnnAttr() { dnn_attrs_.clear(); }

// use one flag for all handles?
// they should be accessed consistently
bool owned_{false};
Expand Down Expand Up @@ -1042,4 +1044,6 @@ void GPUContext::SetDnnAttr(const std::string& attr_name, Attribute attr) {
return impl_->SetDnnAttr(attr_name, std::move(attr));
}

void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); }

} // namespace phi
1 change: 1 addition & 0 deletions paddle/phi/backends/gpu/gpu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ class PADDLE_API GPUContext : public DeviceContext,
bool HasDnnAttr(const std::string& attr_name) const;
const Attribute& GetDnnAttr(const std::string& attr_name) const;
void SetDnnAttr(const std::string& attr_name, Attribute attr);
void ClearDnnAttr();

static const char* name() { return "GPUContext"; }

Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/backends/onednn/onednn_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,8 @@ struct OneDNNContext::Impl {
dnn_attrs_[attr_name] = attr;
}

void ClearDnnAttr() { dnn_attrs_.clear(); }

bool HasDnnInput(const std::string& input_name) const {
return dnn_inputs_.count(input_name) != 0UL;
}
Expand Down Expand Up @@ -425,6 +427,8 @@ void OneDNNContext::SetDnnAttr(const std::string& attr_name, Attribute attr) {
return impl_->SetDnnAttr(attr_name, std::move(attr));
}

void OneDNNContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); }

bool OneDNNContext::HasDnnInput(const std::string& input_name) const {
return impl_->HasDnnInput(input_name);
}
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/backends/onednn/onednn_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@ class OneDNNContext : public CPUContext {
const DenseTensor* GetDnnInput(const std::string& input_name) const;
void SetDnnInput(const std::string& input_name, const DenseTensor* input);

void ClearDnnAttr();

void SetInputsName(const TensorNameMap& inputs_name);

void SetOutputsName(const TensorNameMap& outputs_name);
Expand Down