Skip to content

Commit 737fbdb

Browse files
authored
rm unittests eager guard tests part5 dataloader2dygraph_mnist (#48816)
1 parent f53e5a0 commit 737fbdb

10 files changed

+8
-91
lines changed

python/paddle/fluid/tests/unittests/test_dataloader_dataset.py

+3-19
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,11 @@
1717

1818
import paddle
1919
import paddle.vision.transforms as transforms
20-
from paddle.fluid.framework import _test_eager_guard
2120
from paddle.io import Dataset
2221

2322

2423
class TestDatasetAbstract(unittest.TestCase):
25-
def func_test_main(self):
24+
def test_main(self):
2625
dataset = Dataset()
2726
try:
2827
d = dataset[0]
@@ -36,11 +35,6 @@ def func_test_main(self):
3635
except NotImplementedError:
3736
pass
3837

39-
def test_main(self):
40-
with _test_eager_guard():
41-
self.func_test_main()
42-
self.func_test_main()
43-
4438

4539
class TestDatasetWithDiffOutputPlace(unittest.TestCase):
4640
def get_dataloader(self, num_workers):
@@ -68,7 +62,7 @@ def run_check_on_cpu(self):
6862
self.assertTrue(label.place.is_cpu_place())
6963
break
7064

71-
def func_test_single_process(self):
65+
def test_single_process(self):
7266
self.run_check_on_cpu()
7367
if paddle.is_compiled_with_cuda():
7468
# Get (image, label) tuple from MNIST dataset
@@ -80,12 +74,7 @@ def func_test_single_process(self):
8074
self.assertTrue(label.place.is_cuda_pinned_place())
8175
break
8276

83-
def test_single_process(self):
84-
with _test_eager_guard():
85-
self.func_test_single_process()
86-
self.func_test_single_process()
87-
88-
def func_test_multi_process(self):
77+
def test_multi_process(self):
8978
# DataLoader with multi-process mode is not supported on MacOs and Windows currently
9079
if sys.platform != 'darwin' and sys.platform != 'win32':
9180
self.run_check_on_cpu()
@@ -99,11 +88,6 @@ def func_test_multi_process(self):
9988
self.assertTrue(label.place.is_cuda_pinned_place())
10089
break
10190

102-
def test_multi_process(self):
103-
with _test_eager_guard():
104-
self.func_test_multi_process()
105-
self.func_test_multi_process()
106-
10791

10892
if __name__ == '__main__':
10993
unittest.main()

python/paddle/fluid/tests/unittests/test_deform_conv2d.py

-9
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919

2020
import paddle
2121
import paddle.nn.initializer as I
22-
from paddle.fluid.framework import _test_eager_guard
2322

2423

2524
class TestDeformConv2D(TestCase):
@@ -233,10 +232,6 @@ def test_identity(self):
233232
self.place = paddle.CUDAPlace(0)
234233
self._test_identity()
235234

236-
def test_identity_with_eager_guard(self):
237-
with _test_eager_guard():
238-
self.test_identity()
239-
240235

241236
class TestDeformConv2DFunctional(TestCase):
242237
batch_size = 4
@@ -544,10 +539,6 @@ def test_identity(self):
544539
self.place = paddle.CUDAPlace(0)
545540
self._test_identity()
546541

547-
def test_identity_with_eager_guard(self):
548-
with _test_eager_guard():
549-
self.test_identity()
550-
551542

552543
# testcases for DeformConv2D
553544
class TestDeformConv2DWithPadding(TestDeformConv2D):

python/paddle/fluid/tests/unittests/test_deformable_conv_op.py

-9
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from op_test import OpTest
1919

2020
import paddle
21-
from paddle.fluid.framework import _test_eager_guard
2221

2322
paddle.enable_static()
2423

@@ -442,10 +441,6 @@ def test_invalid_filter():
442441

443442
self.assertRaises(ValueError, test_invalid_filter)
444443

445-
def test_error_with_eager_guard(self):
446-
with _test_eager_guard():
447-
self.test_error()
448-
449444

450445
class TestDeformConv2DAPI(unittest.TestCase):
451446
def test_api(self):
@@ -484,10 +479,6 @@ def test_deform_conv2d_v2():
484479

485480
test_deform_conv2d_v2()
486481

487-
def test_api_with_eager_guard(self):
488-
with _test_eager_guard():
489-
self.test_api()
490-
491482

492483
if __name__ == '__main__':
493484
unittest.main()

python/paddle/fluid/tests/unittests/test_determinant_op.py

-5
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from op_test import OpTest
1919

2020
import paddle
21-
from paddle.fluid.framework import _test_eager_guard
2221

2322
paddle.enable_static()
2423

@@ -87,10 +86,6 @@ def test_api_dygraph(self):
8786
np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001)
8887
paddle.enable_static()
8988

90-
def test_eager(self):
91-
with _test_eager_guard():
92-
self.test_api_dygraph()
93-
9489

9590
class TestSlogDeterminantOp(OpTest):
9691
def setUp(self):

python/paddle/fluid/tests/unittests/test_diag_v2.py

-5
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import paddle
2121
import paddle.fluid as fluid
2222
from paddle.fluid import Program, program_guard
23-
from paddle.fluid.framework import _test_eager_guard
2423

2524

2625
class TestDiagV2Op(OpTest):
@@ -281,8 +280,6 @@ def run_static(self, use_gpu=False):
281280
def test_cpu(self):
282281
paddle.disable_static(place=paddle.fluid.CPUPlace())
283282
self.run_imperative()
284-
with _test_eager_guard():
285-
self.run_imperative()
286283

287284
paddle.enable_static()
288285

@@ -295,8 +292,6 @@ def test_gpu(self):
295292

296293
paddle.disable_static(place=paddle.fluid.CUDAPlace(0))
297294
self.run_imperative()
298-
with _test_eager_guard():
299-
self.run_imperative()
300295
paddle.enable_static()
301296

302297
with fluid.program_guard(fluid.Program()):

python/paddle/fluid/tests/unittests/test_diagonal_op.py

+5-11
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from op_test import OpTest
1919

2020
import paddle
21-
from paddle.fluid.framework import _test_eager_guard
2221

2322
paddle.enable_static()
2423

@@ -157,12 +156,11 @@ def test_api_dygraph(self):
157156

158157
def test_api_eager(self):
159158
paddle.disable_static(self.place)
160-
with _test_eager_guard():
161-
x_tensor = paddle.to_tensor(self.x)
162-
out = paddle.diagonal(x_tensor)
163-
out2 = paddle.diagonal(x_tensor, offset=0, axis1=2, axis2=1)
164-
out3 = paddle.diagonal(x_tensor, offset=1, axis1=0, axis2=1)
165-
out4 = paddle.diagonal(x_tensor, offset=0, axis1=1, axis2=2)
159+
x_tensor = paddle.to_tensor(self.x)
160+
out = paddle.diagonal(x_tensor)
161+
out2 = paddle.diagonal(x_tensor, offset=0, axis1=2, axis2=1)
162+
out3 = paddle.diagonal(x_tensor, offset=1, axis1=0, axis2=1)
163+
out4 = paddle.diagonal(x_tensor, offset=0, axis1=1, axis2=2)
166164
out_ref = np.diagonal(self.x)
167165
np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-08)
168166
out2_ref = np.diagonal(self.x, offset=0, axis1=2, axis2=1)
@@ -174,10 +172,6 @@ def test_api_eager(self):
174172

175173
paddle.enable_static()
176174

177-
def test_api_eager_dygraph(self):
178-
with _test_eager_guard():
179-
self.test_api_dygraph()
180-
181175

182176
if __name__ == '__main__':
183177
unittest.main()

python/paddle/fluid/tests/unittests/test_diff_op.py

-7
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import paddle
2020
import paddle.fluid as fluid
2121
import paddle.fluid.core as core
22-
from paddle.fluid.framework import _test_eager_guard
2322

2423

2524
class TestDiffOp(unittest.TestCase):
@@ -75,9 +74,6 @@ def func_dygraph(self):
7574
self.assertTrue((out.numpy() == self.output).all(), True)
7675

7776
def test_dygraph(self):
78-
with _test_eager_guard():
79-
self.setUp()
80-
self.func_dygraph()
8177
self.setUp()
8278
self.func_dygraph()
8379

@@ -145,9 +141,6 @@ def func_grad(self):
145141
raise RuntimeError("Check Diff Gradient Failed")
146142

147143
def test_grad(self):
148-
with _test_eager_guard():
149-
self.setUp()
150-
self.func_grad()
151144
self.setUp()
152145
self.func_grad()
153146

python/paddle/fluid/tests/unittests/test_digamma_op.py

-12
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
import paddle
2222
import paddle.fluid as fluid
2323
import paddle.static as static
24-
from paddle.fluid.framework import _test_eager_guard
2524

2625

2726
class TestDigammaOp(OpTest):
@@ -95,10 +94,6 @@ def test_in_dynamic_mode(self):
9594
res = paddle.digamma(input_t).numpy()
9695
np.testing.assert_allclose(res, sc_res, rtol=1e-05)
9796

98-
def test_in_eager_dynamic_mode(self):
99-
with _test_eager_guard():
100-
self.test_in_dynamic_mode()
101-
10297
def test_name_argument(self):
10398
with static.program_guard(static.Program()):
10499
x = static.data(name="x", shape=self._shape, dtype=self.dtypes[0])
@@ -119,13 +114,6 @@ def test_dtype_error(self):
119114
input_t = paddle.to_tensor(input)
120115
res = paddle.digamma(input_t)
121116

122-
with self.assertRaises(RuntimeError):
123-
with fluid.dygraph.guard():
124-
with _test_eager_guard():
125-
input = np.random.random(self._shape).astype("int32")
126-
input_t = paddle.to_tensor(input)
127-
res = paddle.digamma(input_t)
128-
129117

130118
if __name__ == "__main__":
131119
unittest.main()

python/paddle/fluid/tests/unittests/test_dist_base.py

-11
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
import paddle.fluid.dygraph as dygraph
3131
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
3232
from paddle.fluid import compiler
33-
from paddle.fluid.framework import _test_eager_guard
3433
from paddle.fluid.incubate.fleet.collective import DistributedStrategy, fleet
3534

3635
RUN_STEP = 5
@@ -1718,16 +1717,6 @@ def check_with_place(
17181717
log_name="",
17191718
):
17201719
if self._dygraph and (self._gloo_mode or self._nccl2_mode):
1721-
need_envs.update({"FLAGS_enable_eager_mode": "1"})
1722-
with _test_eager_guard():
1723-
self.check_with_place_func(
1724-
model_file=model_file,
1725-
delta=delta,
1726-
check_error_log=check_error_log,
1727-
need_envs=need_envs,
1728-
log_name=log_name,
1729-
)
1730-
need_envs.update({"FLAGS_enable_eager_mode": "0"})
17311720
self.check_with_place_func(
17321721
model_file=model_file,
17331722
delta=delta,

python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py

-3
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818

1919
import paddle
2020
import paddle.fluid as fluid
21-
from paddle.fluid.framework import _test_eager_guard
2221
from paddle.nn import Linear
2322

2423

@@ -136,8 +135,6 @@ def func_mnist_fp16(self):
136135
print(loss.numpy())
137136

138137
def test_mnist_fp16(self):
139-
with _test_eager_guard():
140-
self.func_mnist_fp16()
141138
self.func_mnist_fp16()
142139

143140

0 commit comments

Comments
 (0)