-
Notifications
You must be signed in to change notification settings - Fork 5.7k
Fix/Fix memory leak in dygraph #17394
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
JiabinYang
merged 11 commits into
PaddlePaddle:develop
from
JiabinYang:feature/sorted_gradient_dygraph
May 17, 2019
Merged
Changes from 7 commits
Commits
Show all changes
11 commits
Select commit
Hold shift + click to select a range
ab89cbf
test=develop, add gradient sort backward strategy
JiabinYang 32ef54b
test=develop, fix test by add FLAGS_cudnn_deterministic on new tests
JiabinYang 9ddd929
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
JiabinYang c8697b4
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
JiabinYang 49d21a5
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
JiabinYang bf7ecff
test=develop, fix memory leak in dygraph mode
JiabinYang a1e5422
test=develop, fix memory leak in dygraph mode
JiabinYang 3eaead0
test=develop, polish code
JiabinYang a877dd4
test=develop, polish code
JiabinYang 5b252ad
test=develop, merge from develop
JiabinYang 203bfda
test=develop, polish code
JiabinYang File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -112,13 +112,13 @@ void AddGradBySort(BackwardSumMap* bck_map, VarBase* target) { | |
return a.first > b.first; | ||
}); | ||
for (auto& var_pair : current.second) { | ||
Variable* origin_grad = target->var_; | ||
Variable* grad_to_add = var_pair.second->var_; | ||
Variable* origin_grad = target->var_.get(); | ||
Variable* grad_to_add = var_pair.second->var_.get(); | ||
VLOG(2) << "add origin_grad: " << target->Name(); | ||
VLOG(2) << "added grad: " << var_pair.second->Name() | ||
<< " trace id is: " << var_pair.first; | ||
AddTo(grad_to_add, origin_grad, current.first); | ||
delete grad_to_add; | ||
delete var_pair.second; | ||
var_pair.second = nullptr; | ||
} | ||
} | ||
|
@@ -132,8 +132,8 @@ class Autograd { | |
return; | ||
} | ||
VLOG(3) << "start autograd"; | ||
bck_map = new BackwardSumMap(); | ||
grad_ref = new GradientRef(); | ||
bck_map = BackwardSumMap(); | ||
grad_ref = GradientRef(); | ||
std::deque<OpBase*> ready; | ||
ready.push_back(var->PreOp()); | ||
|
||
|
@@ -144,7 +144,7 @@ class Autograd { | |
OpBase* ready_op = ready.front(); | ||
ready.pop_front(); | ||
std::map<std::string, std::vector<VarBase*>> input_grads = | ||
ready_op->ApplyGrad(bck_map, grad_ref, bck_stratedy); | ||
ready_op->ApplyGrad(&bck_map, &grad_ref, bck_stratedy); | ||
|
||
for (auto it = input_grads.rbegin(); it != input_grads.rend(); ++it) { | ||
const std::vector<VarBase*>& ingrads = it->second; | ||
|
@@ -185,12 +185,12 @@ class Autograd { | |
for (const auto& map : candidate->grad_output_vars_) { | ||
for (const auto& it : map) { | ||
for (const auto& vb : it.second) { | ||
if (grad_ref->find(vb) == grad_ref->end()) { | ||
grad_ref->insert(std::make_pair(vb, 1)); | ||
if (grad_ref.find(vb) == grad_ref.end()) { | ||
grad_ref.insert(std::make_pair(vb, 1)); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. C++ guarantees zero-initialization of primitive types. See here. You can just write |
||
} else { | ||
// add ref count by 1 when we find grad_var can be generated by | ||
// one grad_op | ||
grad_ref->at(vb) += 1; | ||
grad_ref.at(vb) += 1; | ||
} | ||
} | ||
} | ||
|
@@ -213,8 +213,8 @@ class Autograd { | |
return ret; | ||
} | ||
|
||
BackwardSumMap* bck_map; | ||
GradientRef* grad_ref; | ||
BackwardSumMap bck_map; | ||
GradientRef grad_ref; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need to be data members. |
||
}; | ||
|
||
std::unique_ptr<VarBase> VarBase::NewVarBase(const platform::Place& dst_place, | ||
|
@@ -230,14 +230,16 @@ std::unique_ptr<VarBase> VarBase::NewVarBase(const platform::Place& dst_place, | |
new_var->var_->GetMutable<framework::LoDTensor>(); | ||
tensor->set_lod(var_->Get<framework::LoDTensor>().lod()); | ||
|
||
framework::TensorCopy(var_->Get<framework::LoDTensor>(), dst_place, tensor); | ||
if (blocking) { | ||
platform::DeviceContext* dev_ctx = | ||
platform::DeviceContext* dst_dev_ctx = | ||
platform::DeviceContextPool::Instance().Get(dst_place); | ||
|
||
framework::TensorCopySync(var_->Get<framework::LoDTensor>(), dst_place, | ||
tensor); | ||
|
||
dev_ctx->Wait(); | ||
platform::DeviceContext* src_dev_ctx = | ||
platform::DeviceContextPool::Instance().Get( | ||
var_->Get<framework::LoDTensor>().place()); | ||
dst_dev_ctx->Wait(); | ||
src_dev_ctx->Wait(); | ||
} else { | ||
framework::TensorCopy(var_->Get<framework::LoDTensor>(), dst_place, tensor); | ||
} | ||
|
@@ -326,7 +328,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad( | |
PADDLE_ENFORCE_NOT_NULL(grad_inp->var_, "op %s input %s nullptr", | ||
grad_op_desc->Type(), grad_inp->Name()); | ||
|
||
grad_invars.emplace_back(grad_inp->var_); | ||
grad_invars.emplace_back(grad_inp->var_.get()); | ||
} | ||
} | ||
|
||
|
@@ -337,7 +339,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad( | |
PADDLE_ENFORCE_NOT_NULL(grad_out->var_, "op %s output %s nullptr", | ||
grad_op_desc->Type(), grad_out->Name()); | ||
|
||
grad_outvars.emplace_back(grad_out->var_); | ||
grad_outvars.emplace_back(grad_out->var_.get()); | ||
} | ||
} | ||
|
||
|
@@ -396,13 +398,13 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad( | |
grad_ref->at(origin_outputs[i])--; | ||
} | ||
} else { | ||
framework::Variable* grad = outputs[i]->var_; | ||
framework::Variable* orig_grad = origin_outputs[i]->var_; | ||
framework::Variable* grad = outputs[i]->var_.get(); | ||
framework::Variable* orig_grad = origin_outputs[i]->var_.get(); | ||
VLOG(2) << "AddTo Called with orig_grad is: " | ||
<< origin_outputs[i]->name_ << " Grad to be added is " | ||
<< outputs[i]->name_; | ||
AddTo(grad, orig_grad, place_); | ||
delete grad; | ||
delete outputs[i]; | ||
} | ||
} | ||
} | ||
|
@@ -453,7 +455,7 @@ void PyLayer::RegisterFunc(int func_id, const py::object& py_func) { | |
|
||
int PyLayer::NumFuncs() { return py_funcs_.size(); } | ||
|
||
std::vector<framework::Variable*> PyLayer::Apply( | ||
std::vector<std::unique_ptr<framework::Variable>> PyLayer::Apply( | ||
int func_id, const std::vector<VarBase*>& inputs) { | ||
PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end()); | ||
return CallPythonFunc(py_funcs_[func_id], inputs); | ||
|
@@ -470,13 +472,13 @@ std::vector<VarBase*> PyLayer::ApplyGrad(int func_id, | |
outs.emplace_back(new VarBase( | ||
string::Sprintf("%s_out_%d", framework::GradVarName(PyLayer::kFwdOut), | ||
i), | ||
rets[i], nullptr, true)); | ||
std::move(rets[i]), nullptr, true)); | ||
} | ||
|
||
return outs; | ||
} | ||
|
||
std::vector<framework::Variable*> PyLayer::CallPythonFunc( | ||
std::vector<std::unique_ptr<framework::Variable>> PyLayer::CallPythonFunc( | ||
const py::object& callable, const std::vector<VarBase*>& ins) { | ||
py::gil_scoped_acquire guard; | ||
py::tuple in_args(ins.size()); | ||
|
@@ -490,19 +492,20 @@ std::vector<framework::Variable*> PyLayer::CallPythonFunc( | |
auto ret = callable(in_args); | ||
auto ret_tuple = py::cast<py::tuple>(ret); | ||
size_t ret_num = py::len(ret_tuple); | ||
std::vector<framework::Variable*> outs; | ||
std::vector<std::unique_ptr<framework::Variable>> outs; | ||
outs.reserve(ret_num); | ||
VLOG(3) << "pyfunc out " << ret_num; | ||
for (size_t i = 0; i < ret_num; ++i) { | ||
try { | ||
auto* py_out_tensor = py::cast<framework::LoDTensor*>(ret_tuple[i]); | ||
PADDLE_ENFORCE_NOT_NULL(py_out_tensor, | ||
"Output tensor %d should not be nullptr", i); | ||
auto* var = new framework::Variable(); | ||
auto var = | ||
std::unique_ptr<framework::Variable>(new framework::Variable()); | ||
auto* tensor = var->GetMutable<framework::LoDTensor>(); | ||
tensor->ShareDataWith(*py_out_tensor); | ||
tensor->set_lod(py_out_tensor->lod()); | ||
outs.emplace_back(var); | ||
outs.emplace_back(std::move(var)); | ||
} catch (py::cast_error&) { | ||
PADDLE_THROW("The %d-th output must be LoDTensor", i); | ||
} | ||
|
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These two lines are unnecessary. BTW, it seems that it is not necessary that
bck_map
andgrad_ref
are data member ofAutograd
. Just make them temporary variable inside the function.