@@ -196,12 +196,12 @@ def avg_pool1d(x,
196
196
.. code-block:: python
197
197
198
198
import paddle
199
- import paddle.nn.functional as F
200
- import numpy as np
199
+ import paddle.nn as nn
201
200
202
- data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
203
- out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0)
204
- # out shape: [1, 3, 16]
201
+ data = paddle.uniform([1, 3, 32], paddle.float32)
202
+ AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
203
+ pool_out = AvgPool1D(data)
204
+ # pool_out shape: [1, 3, 16]
205
205
"""
206
206
"""NCL to NCHW"""
207
207
data_format = "NCHW"
@@ -316,10 +316,9 @@ def avg_pool2d(x,
316
316
317
317
import paddle
318
318
import paddle.nn.functional as F
319
- import numpy as np
320
319
321
320
# avg pool2d
322
- x = paddle.to_tensor(np.random. uniform(-1, 1, [1, 3, 32, 32]).astype(np. float32) )
321
+ x = paddle.uniform([1, 3, 32, 32], paddle. float32)
323
322
out = F.avg_pool2d(x,
324
323
kernel_size=2,
325
324
stride=2, padding=0)
@@ -439,9 +438,8 @@ def avg_pool3d(x,
439
438
.. code-block:: python
440
439
441
440
import paddle
442
- import numpy as np
443
441
444
- x = paddle.to_tensor(np.random. uniform(-1, 1, [1, 3, 32, 32, 32]).astype(np. float32) )
442
+ x = paddle.uniform([1, 3, 32, 32, 32], paddle. float32)
445
443
# avg pool3d
446
444
out = paddle.nn.functional.avg_pool3d(
447
445
x,
@@ -564,9 +562,8 @@ def max_pool1d(x,
564
562
565
563
import paddle
566
564
import paddle.nn.functional as F
567
- import numpy as np
568
565
569
- data = paddle.to_tensor(np.random. uniform(-1, 1, [1, 3, 32]).astype(np. float32) )
566
+ data = paddle.uniform([1, 3, 32], paddle. float32)
570
567
pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0)
571
568
# pool_out shape: [1, 3, 16]
572
569
pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True)
@@ -1275,8 +1272,10 @@ def adaptive_avg_pool1d(x, output_size, name=None):
1275
1272
x (Tensor): The input Tensor of pooling, which is a 3-D tensor with shape :math:`[N, C, L]`, where :math:`N` is batch size, :math:`C` is the number of channels and :math:`L` is the length of the feature. The data type is float32 or float64.
1276
1273
output_size (int): The target output size. Its data type must be int.
1277
1274
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1275
+
1278
1276
Returns:
1279
1277
Tensor: The result of 1D adaptive average pooling. Its data type is same as input.
1278
+
1280
1279
Examples:
1281
1280
.. code-block:: python
1282
1281
@@ -1359,8 +1358,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
1359
1358
None by default.
1360
1359
Returns:
1361
1360
Tensor: The output tensor of avg adaptive pool2d result. The data type is same as input tensor.
1362
- Raises:
1363
- ValueError: If `data_format` is not "NCHW" or "NHWC".
1361
+
1364
1362
Examples:
1365
1363
.. code-block:: python
1366
1364
@@ -1499,12 +1497,10 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
1499
1497
# output[:, :, i, j, k] =
1500
1498
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
1501
1499
import paddle
1502
- import numpy as np
1503
- input_data = np.random.rand(2, 3, 8, 32, 32)
1504
- x = paddle.to_tensor(input_data)
1505
- # x.shape is [2, 3, 8, 32, 32]
1500
+
1501
+ input_data = paddle.randn(shape=(2, 3, 8, 32, 32))
1506
1502
out = paddle.nn.functional.adaptive_avg_pool3d(
1507
- x = x ,
1503
+ x = input_data ,
1508
1504
output_size=[3, 3, 3])
1509
1505
# out.shape is [2, 3, 3, 3, 3]
1510
1506
"""
@@ -1597,9 +1593,8 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
1597
1593
#
1598
1594
import paddle
1599
1595
import paddle.nn.functional as F
1600
- import numpy as np
1601
1596
1602
- data = paddle.to_tensor(np.random. uniform(-1, 1, [1, 3, 32]).astype(np. float32) )
1597
+ data = paddle.uniform([1, 3, 32], paddle. float32)
1603
1598
pool_out = F.adaptive_max_pool1d(data, output_size=16)
1604
1599
# pool_out shape: [1, 3, 16])
1605
1600
pool_out, indices = F.adaptive_max_pool1d(data, output_size=16, return_mask=True)
@@ -1678,13 +1673,10 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
1678
1673
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
1679
1674
#
1680
1675
import paddle
1681
- import numpy as np
1682
1676
1683
- input_data = np.random.rand(2, 3, 32, 32)
1684
- x = paddle.to_tensor(input_data)
1685
- # x.shape is [2, 3, 32, 32]
1677
+ input_data = paddle.randn(shape=(2, 3, 32, 32))
1686
1678
out = paddle.nn.functional.adaptive_max_pool2d(
1687
- x = x ,
1679
+ x = input_data ,
1688
1680
output_size=[3, 3])
1689
1681
# out.shape is [2, 3, 3, 3]
1690
1682
"""
@@ -1768,13 +1760,10 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
1768
1760
# output[:, :, i, j, k] = max(input[:, :, dstart: dend, hstart: hend, wstart: wend])
1769
1761
#
1770
1762
import paddle
1771
- import numpy as np
1772
1763
1773
- input_data = np.random.rand(2, 3, 8, 32, 32)
1774
- x = paddle.to_tensor(input_data)
1775
- # x.shape is [2, 3, 8, 32, 32]
1764
+ input_data = paddle.randn(shape=(2, 3, 8, 32, 32))
1776
1765
out = paddle.nn.functional.adaptive_max_pool3d(
1777
- x = x ,
1766
+ x = input_data ,
1778
1767
output_size=[3, 3, 3])
1779
1768
# out.shape is [2, 3, 3, 3, 3]
1780
1769
"""
0 commit comments