Skip to content

Commit 073b482

Browse files
committed
Fix
1 parent af91d96 commit 073b482

File tree

10 files changed

+21
-21
lines changed

10 files changed

+21
-21
lines changed

python/paddle/incubate/autograd/functional.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def vjp(func, xs, v=None):
3131
returns a sequence of Tensors or a Tensor.
3232
xs(Tensor|Sequence[Tensor]): Used as positional arguments to evaluate
3333
``func``. ``xs`` is accepted as one Tensor or a sequence of Tensors.
34-
v(Tensor|Sequence[Tensor]|None, optional): The cotangent vector invovled
34+
v(Tensor|Sequence[Tensor]|None, optional): The cotangent vector involved
3535
in the VJP computation. ``v`` matches the size and shape of
3636
``func`` 's output. Defaults to None, which is equivalent to all
3737
ones the same size of ``func`` 's output.
@@ -67,8 +67,8 @@ def vjp(func, xs, v=None):
6767
"""
6868
_check_inputs(func, xs, v)
6969

70-
# ``_seprate`` breaks the dependencies between ``xs`` and other
71-
# variables. See more ``_seprate`` .
70+
# ``_separate`` breaks the dependencies between ``xs`` and other
71+
# variables. See more ``_separate`` .
7272
if framework.in_dygraph_mode() or not utils.prim_enabled():
7373
xs, v = _separate(xs), _separate(v)
7474
ys = func(*xs) if isinstance(xs, typing.Sequence) else func(xs)
@@ -91,7 +91,7 @@ def jvp(func, xs, v=None):
9191
xs(Tensor|Sequence[Tensor]): Used as positional arguments to
9292
evaluate ``func``. The ``xs`` is accepted as one Tensor or a
9393
Sequence of Tensors.
94-
v(Tensor|Sequence[Tensor]|None, Optional): The tangent vector invovled
94+
v(Tensor|Sequence[Tensor]|None, Optional): The tangent vector involved
9595
in the JVP computation. The ``v`` matches the size and shape of
9696
``xs`` . Default value is None and in this case is equivalent to
9797
all ones the same size of ``xs`` .
@@ -127,8 +127,8 @@ def jvp(func, xs, v=None):
127127
128128
"""
129129
_check_inputs(func, xs, v)
130-
# ``_seprate`` breaks the dependencies between ``xs`` and other
131-
# variables. See more ``_seprate`` .
130+
# ``_separate`` breaks the dependencies between ``xs`` and other
131+
# variables. See more ``_separate`` .
132132
if framework.in_dygraph_mode() or not utils.prim_enabled():
133133
xs, v = _separate(xs), _separate(v)
134134
ys = func(*xs) if isinstance(xs, typing.Sequence) else func(xs)
@@ -153,7 +153,7 @@ def _double_backward_trick(ys, xs, v):
153153

154154
def _zeros_like_with_grad(xs):
155155
"""Create a zero or zeros sequence Tensor like ``xs`` with a flag
156-
``stop_graident=False`` .
156+
``stop_gradient=False`` .
157157
"""
158158
if not isinstance(xs, typing.Sequence):
159159
ys = paddle.zeros_like(xs)
@@ -309,7 +309,7 @@ def _jac_func(*xs):
309309
not is_batched and jac.shape[0] != 1
310310
):
311311
raise RuntimeError(
312-
"The function given to Hessian shoud return as single element Tensor or batched single element Tensor."
312+
"The function given to Hessian should return as single element Tensor or batched single element Tensor."
313313
)
314314
return jac[:, 0, :] if is_batched else jac[0, :]
315315

@@ -485,7 +485,7 @@ def _multi_index(indexes, shape):
485485
486486
Currently supporting following input format:
487487
* ([positive|negative|slice], ...), the right-most elements can be
488-
omited.
488+
omitted.
489489
490490
The standard format after converted is slice tuple which contains N elements:
491491
* ([positive|slice], ..., [positive|slice])

python/paddle/incubate/distributed/fleet/parameter_server/ir/pserver_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -896,7 +896,7 @@ def add_large_scale_op(
896896
entry_attr = get_entry_attr(param)
897897

898898
if fuse:
899-
# remove origin optimzier op
899+
# remove origin optimizer op
900900
opt_block._remove_op(opt_idx)
901901

902902
# training/infer

python/paddle/incubate/distributed/utils/io/save_for_auto.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def _save_param_attr(state_dict_, path, dims_mapping_dict=None):
121121
save params' attr dict
122122
Args:
123123
state_dict_:
124-
state for which to save attrs, when the state is optimzier state, the master and LRScheduler will be reomoved.
124+
state for which to save attrs, when the state is optimizer state, the master and LRScheduler will be removed.
125125
path:
126126
path to save
127127
dims_mapping_dict:

python/paddle/incubate/nn/functional/fused_gate_attention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def fused_gate_attention(
3434
use_flash_attn=False,
3535
):
3636
r"""
37-
Attention mapps queries and a set of key-value pairs to outputs, and
37+
Attention maps queries and a set of key-value pairs to outputs, and
3838
Gate Attention performs multiple parallel attention to jointly attending
3939
to information from different representation subspaces. This API only
4040
support self_attention. The pseudo code is as follows:

python/paddle/incubate/operators/unzip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def unzip(input, lod, len):
2121
2222
**unzip layers**
2323
24-
unzip 'input' accroding to 'lod'
24+
unzip 'input' according to 'lod'
2525
2626
Args:
2727
input (Variable): The zipped input

python/paddle/incubate/optimizer/functional/line_search.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def cubic_interpolation_(x1, f1, g1, x2, f2, g2):
3030
x1, f1, g1: point1's position, value and gradient.
3131
x2, f2, g2: point2's position, value and gradient.
3232
Returns:
33-
min_pos: the minimun point between the specified points in the cubic curve.
33+
min_pos: the minimum point between the specified points in the cubic curve.
3434
"""
3535
xmin, xmax = paddle.static.nn.cond(
3636
x1 <= x2, lambda: (x1, x2), lambda: (x2, x1)

python/paddle/incubate/optimizer/line_search_dygraph.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
1919
r"""Cubic interpolation between (x1, f1, g1) and (x2, f2, g2).
20-
Use two points and their gradient to determine a cubic function and get the minimun point
20+
Use two points and their gradient to determine a cubic function and get the minimum point
2121
between them in the cubic curve.
2222
2323
Reference:
@@ -30,7 +30,7 @@ def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
3030
bounds: bounds of interpolation area
3131
3232
Returns:
33-
min_pos: the minimun point between the specified points in the cubic curve.
33+
min_pos: the minimum point between the specified points in the cubic curve.
3434
"""
3535
# Compute bounds of interpolation area
3636
if bounds is not None:

python/paddle/incubate/optimizer/lookahead.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ class LookAhead(Optimizer):
4242
4343
Args:
4444
inner_optimizer (Optimizer): The optimizer that update fast params step by step.
45-
alpha (float, optinal): The learning rate of Lookahead. The default value is 0.5.
46-
k (int, optinal): The slow params is updated every k steps. The default value is 5.
45+
alpha (float, optional): The learning rate of Lookahead. The default value is 0.5.
46+
k (int, optional): The slow params is updated every k steps. The default value is 5.
4747
name (str, optional): Normally there is no need for user to set this property.
4848
For more information, please refer to :ref:`api_guide_Name`.
4949
The default value is None.

python/paddle/incubate/tensor/manipulation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
def _npu_identity(x, format=-1):
2626
"""
2727
28-
This OP takes in the Tensor :attr:`x` and change it to ouptut with
28+
This OP takes in the Tensor :attr:`x` and change it to output with
2929
aclFormat with int value. This API is only used for Ascend NPU.
3030
3131
Args:

python/paddle/io/dataloader/dataloader_iter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,11 +49,11 @@
4949
# NOTE: fix `terminate called without an active exception`
5050
# if for loop break and program exit immediately(with no model
5151
# layers processing) after iterate **the first few data** in
52-
# distributed lauch mode, distributed launch will call
52+
# distributed launch mode, distributed launch will call
5353
# terminate() to kill main process on each devices, but thread
5454
# is still iterating to fullfill blocking queue caches, which
5555
# may cause thread error `terminate called without an active
56-
# exception` for terminate is a strong singal and `__del__`
56+
# exception` for terminate is a strong signal and `__del__`
5757
# of DataLoader may not be called, so we add a global link to
5858
# the last DataLoader instance to call `__del__` to clean up
5959
# resources

0 commit comments

Comments
 (0)