Skip to content

Commit 9466e95

Browse files
committed
reduce unittest time by rename testcuda to has_cuda
test=develop
1 parent a770ce0 commit 9466e95

File tree

5 files changed

+15
-15
lines changed

5 files changed

+15
-15
lines changed

python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,11 +90,11 @@ def setUp(self):
9090

9191
self.set_outputs()
9292

93-
def testcuda(self):
93+
def has_cuda(self):
9494
return core.is_compiled_with_cuda()
9595

9696
def test_check_output(self):
97-
if self.testcuda():
97+
if self.has_cuda():
9898
place = core.CUDAPlace(0)
9999
self.check_output_with_place(place, atol=1e-5)
100100
else:

python/paddle/fluid/tests/unittests/test_conv3d_op.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -108,24 +108,24 @@ def setUp(self):
108108
}
109109
self.outputs = {'Output': output}
110110

111-
def testcudnn(self):
111+
def has_cudnn(self):
112112
return core.is_compiled_with_cuda() and self.use_cudnn
113113

114114
def test_check_output(self):
115-
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
115+
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
116116
self.check_output_with_place(place, atol=1e-5)
117117

118118
def test_check_grad(self):
119119
if self.dtype == np.float16:
120120
return
121-
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
121+
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
122122
self.check_grad_with_place(
123123
place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03)
124124

125125
def test_check_grad_no_filter(self):
126126
if self.dtype == np.float16:
127127
return
128-
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
128+
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
129129
self.check_grad_with_place(
130130
place, ['Input'],
131131
'Output',
@@ -135,7 +135,7 @@ def test_check_grad_no_filter(self):
135135
def test_check_grad_no_input(self):
136136
if self.dtype == np.float16:
137137
return
138-
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
138+
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
139139
self.check_grad_with_place(
140140
place, ['Input'],
141141
'Output',

python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ def setUp(self):
171171
}
172172

173173
def test_output_with_place(self):
174-
if self.testcuda():
174+
if self.has_cuda():
175175
place = core.CUDAPlace(0)
176176
self.check_output_with_place(place, atol=1e-5)
177177

@@ -184,7 +184,7 @@ def test_grad_with_place(self):
184184
['Out', 'last_h', 'last_c'],
185185
max_relative_error=0.02)
186186

187-
def testcuda(self):
187+
def has_cuda(self):
188188
return core.is_compiled_with_cuda()
189189

190190

python/paddle/fluid/tests/unittests/test_pool2d_op.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -148,11 +148,11 @@ def setUp(self):
148148

149149
self.outputs = {'Out': output}
150150

151-
def testcudnn(self):
151+
def has_cudnn(self):
152152
return core.is_compiled_with_cuda() and self.use_cudnn
153153

154154
def test_check_output(self):
155-
if self.testcudnn():
155+
if self.has_cudnn():
156156
place = core.CUDAPlace(0)
157157
self.check_output_with_place(place, atol=1e-5)
158158
else:
@@ -161,7 +161,7 @@ def test_check_output(self):
161161
def test_check_grad(self):
162162
if self.dtype == np.float16:
163163
return
164-
if self.testcudnn() and self.pool_type != "max":
164+
if self.has_cudnn() and self.pool_type != "max":
165165
place = core.CUDAPlace(0)
166166
self.check_grad_with_place(
167167
place, set(['X']), 'Out', max_relative_error=0.07)

python/paddle/fluid/tests/unittests/test_pool3d_op.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -172,11 +172,11 @@ def setUp(self):
172172

173173
self.outputs = {'Out': output}
174174

175-
def testcudnn(self):
175+
def has_cudnn(self):
176176
return core.is_compiled_with_cuda() and self.use_cudnn
177177

178178
def test_check_output(self):
179-
if self.testcudnn():
179+
if self.has_cudnn():
180180
place = core.CUDAPlace(0)
181181
self.check_output_with_place(place, atol=1e-5)
182182
else:
@@ -185,7 +185,7 @@ def test_check_output(self):
185185
def test_check_grad(self):
186186
if self.dtype == np.float16:
187187
return
188-
if self.testcudnn() and self.pool_type != "max":
188+
if self.has_cudnn() and self.pool_type != "max":
189189
place = core.CUDAPlace(0)
190190
self.check_grad_with_place(
191191
place, set(['X']), 'Out', max_relative_error=0.07)

0 commit comments

Comments
 (0)