From f3841bc784a2b14a8d55c43848c10875eec40f84 Mon Sep 17 00:00:00 2001 From: co63oc Date: Sun, 11 May 2025 10:07:14 +0800 Subject: [PATCH] Fix --- python/paddle/amp/grad_scaler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index c371918e3f0e4..6428d7705f464 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -958,7 +958,7 @@ def is_use_dynamic_loss_scaling(self) -> bool: Whether to use dynamic loss scaling. Returns: - bool: if fixed loss_scaling is used return False, if the loss scaling is updated dynamically return true. + bool: if fixed loss_scaling is used return False, if the loss scaling is updated dynamically return True. Examples: .. code-block:: python @@ -1213,7 +1213,7 @@ def get_decr_every_n_nan_or_inf(self) -> int: Return the num `n`, `n` represent decreases loss scaling every `n` accumulated steps with nan or inf gradients. Returns: - int: the num `n`, `n` represent decreases loss scaling every `n` accumulated steps with nan or inf gradients. + int: the num `n`, `n` represent decreases loss scaling every `n` accumulated steps with nan or inf gradients. Examples: .. code-block:: python @@ -1308,7 +1308,7 @@ def load_state_dict(self, state_dict: _ScaleStateDict) -> None: Loads the scaler state. Args: - state_dict(dict): scaler state. Should be an object returned from a call to `GradScaler.state_dict()`. + state_dict(dict): scaler state. Should be an object returned from a call to `GradScaler.state_dict()`. Examples: