Skip to content

Commit 099a45a

Browse files
committed
Revert "refine optimizer create accumulators (PaddlePaddle#50188)"
This reverts commit 244e754.
1 parent b357312 commit 099a45a

File tree

10 files changed

+0
-33
lines changed

10 files changed

+0
-33
lines changed

python/paddle/optimizer/adadelta.py

-3
Original file line numberDiff line numberDiff line change
@@ -145,11 +145,8 @@ def _create_accumulators(self, block, parameters):
145145
parameters = parameters.get('params')
146146

147147
for p in parameters:
148-
if p.name in self._already_create_accumulater:
149-
continue
150148
self._add_accumulator(self._avg_squared_grad_acc_str, p)
151149
self._add_accumulator(self._avg_squared_update_acc_str, p)
152-
self._already_create_accumulater.add(p.name)
153150

154151
def _append_optimize_op(self, block, param_and_grad):
155152
if isinstance(param_and_grad, dict):

python/paddle/optimizer/adagrad.py

-3
Original file line numberDiff line numberDiff line change
@@ -139,14 +139,11 @@ def _create_accumulators(self, block, parameters):
139139
parameters = self._update_param_group(parameters)
140140

141141
for p in parameters:
142-
if p.name in self._already_create_accumulater:
143-
continue
144142
self._add_accumulator(
145143
self._moment_acc_str,
146144
p,
147145
fill_value=self.initial_accumulator_value,
148146
)
149-
self._already_create_accumulater.add(p.name)
150147

151148
def _append_optimize_op(self, block, param_and_grad):
152149
assert isinstance(block, framework.Block)

python/paddle/optimizer/adam.py

-4
Original file line numberDiff line numberDiff line change
@@ -317,12 +317,9 @@ def _create_accumulators(self, block, parameters):
317317

318318
# Create accumulator tensors for first and second moments
319319
for p in parameters:
320-
if p.name in self._already_create_accumulater:
321-
continue
322320
if self._multi_precision and self._is_dtype_fp16_or_bf16(p.dtype):
323321
master_p = self._create_master_weight(p)
324322
self._add_moments_pows(master_p)
325-
self._already_create_accumulater.add(p.name)
326323
continue
327324
if (
328325
self._is_dtype_fp16_or_bf16(p.dtype)
@@ -333,7 +330,6 @@ def _create_accumulators(self, block, parameters):
333330
"Consider using multi_precision=True option of the Adam optimizer."
334331
)
335332
self._add_moments_pows(p)
336-
self._already_create_accumulater.add(p.name)
337333

338334
def _append_optimize_op(self, block, param_and_grad):
339335
assert isinstance(block, framework.Block)

python/paddle/optimizer/adamax.py

-3
Original file line numberDiff line numberDiff line change
@@ -176,8 +176,6 @@ def _create_accumulators(self, block, parameters):
176176

177177
# Create accumulator tensors for first moment and infinity norm
178178
for p in parameters:
179-
if p.name in self._already_create_accumulater:
180-
continue
181179
self._add_accumulator(self._moment_acc_str, p)
182180
self._add_accumulator(self._inf_norm_acc_str, p)
183181
self._add_accumulator(
@@ -186,7 +184,6 @@ def _create_accumulators(self, block, parameters):
186184
fill_value=self._beta1,
187185
shape=[1],
188186
)
189-
self._already_create_accumulater.add(p.name)
190187

191188
def _append_optimize_op(self, block, param_and_grad):
192189
assert isinstance(block, framework.Block)

python/paddle/optimizer/adamw.py

-5
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,6 @@ def __init__(
281281
self._use_multi_tensor = None
282282
self.regularization = None
283283
self._auxiliary_vars = {}
284-
self._already_create_accumulater = set()
285284

286285
def _set_auxiliary_var(self, key, val):
287286
self._auxiliary_vars[key] = val
@@ -423,12 +422,9 @@ def _create_accumulators(self, block, parameters):
423422

424423
# Create accumulator tensors for first and second moments
425424
for p in parameters:
426-
if p.name in self._already_create_accumulater:
427-
continue
428425
if self._multi_precision and self._is_dtype_fp16_or_bf16(p.dtype):
429426
master_p = self._create_master_weight(p)
430427
self._add_moments_pows(master_p)
431-
self._already_create_accumulater.add(p.name)
432428
continue
433429
if (
434430
self._is_dtype_fp16_or_bf16(p.dtype)
@@ -439,7 +435,6 @@ def _create_accumulators(self, block, parameters):
439435
"Consider using multi_precision=True option of the Adam optimizer."
440436
)
441437
self._add_moments_pows(p)
442-
self._already_create_accumulater.add(p.name)
443438

444439
def _append_optimize_op(self, block, param_and_grad):
445440
assert isinstance(block, framework.Block)

python/paddle/optimizer/lamb.py

-4
Original file line numberDiff line numberDiff line change
@@ -190,15 +190,11 @@ def _create_accumulators(self, block, parameters):
190190

191191
# Create accumulator tensors for first and second moments
192192
for p in parameters:
193-
if p.name in self._already_create_accumulater:
194-
continue
195193
if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
196194
master_p = self._create_master_weight(p)
197195
self._add_moments_pows(master_p)
198-
self._already_create_accumulater.add(p.name)
199196
else:
200197
self._add_moments_pows(p)
201-
self._already_create_accumulater.add(p.name)
202198

203199
def _get_accumulator(self, name, param):
204200
"""Utility function to fetch an accumulator for a parameter

python/paddle/optimizer/momentum.py

-4
Original file line numberDiff line numberDiff line change
@@ -270,12 +270,9 @@ def _create_accumulators(self, block, parameters):
270270
parameters = self._update_param_group(parameters)
271271

272272
for p in parameters:
273-
if p.name in self._already_create_accumulater:
274-
continue
275273
if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
276274
master_p = self._create_master_weight(p)
277275
self._add_accumulator(self._velocity_acc_str, master_p)
278-
self._already_create_accumulater.add(p.name)
279276
continue
280277
if (
281278
p.dtype == core.VarDesc.VarType.FP16
@@ -286,7 +283,6 @@ def _create_accumulators(self, block, parameters):
286283
"Consider using multi_precision=True option of the Momentum optimizer."
287284
)
288285
self._add_accumulator(self._velocity_acc_str, p)
289-
self._already_create_accumulater.add(p.name)
290286

291287
def _create_regularization_of_grad(self, param, grad, regularization=None):
292288
"""Create and add backward regularization Operators

python/paddle/optimizer/optimizer.py

-1
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,6 @@ def __init__(
275275

276276
self._param_dict = self._create_multi_tensor_dict()
277277
self._auxiliary_vars = {}
278-
self._already_create_accumulater = set()
279278

280279
def _set_auxiliary_var(self, key, val):
281280
self._auxiliary_vars[key] = val

python/paddle/optimizer/rmsprop.py

-3
Original file line numberDiff line numberDiff line change
@@ -199,12 +199,9 @@ def _create_accumulators(self, block, parameters):
199199
parameters = parameters.get('params')
200200

201201
for p in parameters:
202-
if p.name in self._already_create_accumulater:
203-
continue
204202
self._add_accumulator(self._momentum_acc_str, p)
205203
self._add_accumulator(self._mean_square_acc_str, p)
206204
self._add_accumulator(self._mean_grad_acc_str, p)
207-
self._already_create_accumulater.add(p.name)
208205

209206
def _append_optimize_op(self, block, param_and_grad):
210207
if not isinstance(block, framework.Block):

python/paddle/optimizer/sgd.py

-3
Original file line numberDiff line numberDiff line change
@@ -129,11 +129,8 @@ def _create_accumulators(self, block, parameters):
129129

130130
# Create accumulator tensors for first and second moments
131131
for p in parameters:
132-
if p.name in self._already_create_accumulater:
133-
continue
134132
if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
135133
master_p = self._create_master_weight(p)
136-
self._already_create_accumulater.add(p.name)
137134
continue
138135
if (
139136
p.dtype == core.VarDesc.VarType.FP16

0 commit comments

Comments
 (0)