diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 310cc07d5f5b4..186d2bfe79505 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -1818,7 +1818,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None): pool_size = [1] + convert_to_list(output_size, 1, 'pool_size') x = unsqueeze(x, [2]) - if in_dygraph_mode(): + if in_dynamic_or_pir_mode(): pool_out = _C_ops.max_pool2d_with_index( x, pool_size, [1, 1], [0, 0], False, True ) @@ -1912,7 +1912,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None): output_size[0] = in_h if output_size[1] is None: output_size[1] = in_w - if in_dygraph_mode(): + if in_dynamic_or_pir_mode(): pool_out = _C_ops.max_pool2d_with_index( x, output_size, [1, 1], [0, 0], False, True ) @@ -2003,7 +2003,7 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None): if output_size[2] is None: output_size[2] = in_w - if in_dygraph_mode(): + if in_dynamic_or_pir_mode(): # By default, strides is [1,1,1] and paddings is [0, 0, 0] pool_out = _C_ops.max_pool3d_with_index( x, output_size, [1, 1, 1], [0, 0, 0], False, True diff --git a/test/deprecated/legacy_test/test_adaptive_max_pool1d.py b/test/legacy_test/test_adaptive_max_pool1d.py similarity index 100% rename from test/deprecated/legacy_test/test_adaptive_max_pool1d.py rename to test/legacy_test/test_adaptive_max_pool1d.py diff --git a/test/deprecated/legacy_test/test_adaptive_max_pool2d.py b/test/legacy_test/test_adaptive_max_pool2d.py similarity index 85% rename from test/deprecated/legacy_test/test_adaptive_max_pool2d.py rename to test/legacy_test/test_adaptive_max_pool2d.py index 3a0579cbcc1fb..d04a6ed973765 100644 --- a/test/deprecated/legacy_test/test_adaptive_max_pool2d.py +++ b/test/legacy_test/test_adaptive_max_pool2d.py @@ -279,46 +279,53 @@ def test_static_graph(self): for use_cuda in ( [False, True] if core.is_compiled_with_cuda() else [False] ): - place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - paddle.enable_static() - x = paddle.static.data( - name="x", shape=[2, 3, 7, 7], dtype="float32" - ) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() + paddle.enable_static() + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) - adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3]) - out_1 = adaptive_max_pool(x=x) + adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( + output_size=[3, 3] + ) + out_1 = adaptive_max_pool(x=x) - adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5) - out_2 = adaptive_max_pool(x=x) + adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5) + out_2 = adaptive_max_pool(x=x) - adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5]) - out_3 = adaptive_max_pool(x=x) + adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( + output_size=[2, 5] + ) + out_3 = adaptive_max_pool(x=x) - # adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( - # output_size=[3, 3], data_format="NHWC") - # out_4 = adaptive_max_pool(x=x) + # adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( + # output_size=[3, 3], data_format="NHWC") + # out_4 = adaptive_max_pool(x=x) - adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( - output_size=[None, 3] - ) - out_5 = adaptive_max_pool(x=x) + adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( + output_size=[None, 3] + ) + out_5 = adaptive_max_pool(x=x) - exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, res_5] = exe.run( - base.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_5], - ) + exe = paddle.static.Executor(place=place) + [res_1, res_2, res_3, res_5] = exe.run( + base.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_5], + ) - np.testing.assert_allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - np.testing.assert_allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - np.testing.assert_allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) - # np.testing.assert_allclose(res_4, self.res_4_np) + # np.testing.assert_allclose(res_4, self.res_4_np) - np.testing.assert_allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in (