Skip to content

Commit b7d1701

Browse files
authored
[MLU] fix custom_device uts, remove fluid apis (#455)
1 parent 717aeca commit b7d1701

11 files changed

+455
-445
lines changed

backends/mlu/tests/unittests/test_hard_sigmoid_op_mlu.py

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929

3030
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
31-
return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype)
31+
return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3232

3333

3434
class TestMLUHardSigmoid(OpTest):
@@ -42,7 +42,7 @@ def setUp(self):
4242

4343
x = np.random.uniform(-5, 5, [10, 12]).astype(self.dtype)
4444
lower_threshold = -self.offset / self.slope
45-
upper_threshold = (1. - self.offset) / self.slope
45+
upper_threshold = (1.0 - self.offset) / self.slope
4646

4747
# Same reason as TestAbs
4848
delta = 0.005
@@ -51,19 +51,19 @@ def setUp(self):
5151

5252
out = ref_hardsigmoid(x, self.slope, self.offset)
5353

54-
self.attrs = {'slope': self.slope, 'offset': self.offset}
55-
self.inputs = {'X': x}
56-
self.outputs = {'Out': out}
54+
self.attrs = {"slope": self.slope, "offset": self.offset}
55+
self.inputs = {"X": x}
56+
self.outputs = {"Out": out}
5757

5858
def test_check_output(self):
5959
self.check_output_with_place(self.place)
6060

6161
def test_check_grad(self):
62-
self.check_grad_with_place(self.place, ['X'], 'Out')
62+
self.check_grad_with_place(self.place, ["X"], "Out")
6363

6464
def set_mlu(self):
6565
self.__class__.use_custom_device = True
66-
self.place = paddle.CustomPlace('CustomMLU', 0)
66+
self.place = paddle.CustomPlace("CustomMLU", 0)
6767

6868
def init_dtype(self):
6969
self.dtype = np.float32
@@ -89,13 +89,13 @@ class TestMLUHardSigmoidFp16(unittest.TestCase):
8989
def setUp(self):
9090
paddle.disable_static()
9191

92-
self.place = paddle.CustomPlace('CustomMLU', 0)
92+
self.place = paddle.CustomPlace("CustomMLU", 0)
9393
self.__class__.use_custom_device = True
9494
self.dtype = np.float32
9595

9696
# float32
9797
self.float32_x = np.random.uniform(-5, 5, [10, 12]).astype(np.float32)
98-
paddle.set_device('cpu')
98+
paddle.set_device("cpu")
9999
data = paddle.to_tensor(self.float32_x, stop_gradient=True)
100100
self.float32_y = F.hardsigmoid(data)
101101

@@ -105,23 +105,27 @@ def setUp(self):
105105

106106
def test_check_output_and_grad_mlu(self):
107107
# mlu float16
108-
paddle.set_device('CustomMLU')
108+
paddle.set_device("CustomMLU")
109109
data = paddle.to_tensor(self.float16_x, stop_gradient=True)
110110
mlu_float16_y = F.hardsigmoid(data)
111111

112112
cpu_diff_1 = np.divide(
113113
np.sum(np.abs(self.float32_y.numpy() - self.float16_y)),
114-
np.sum(np.abs(self.float32_y.numpy())))
114+
np.sum(np.abs(self.float32_y.numpy())),
115+
)
115116
mlu_diff_1 = np.divide(
116117
np.sum(np.abs(self.float32_y.numpy() - mlu_float16_y.numpy())),
117-
np.sum(np.abs(self.float32_y.numpy())))
118+
np.sum(np.abs(self.float32_y.numpy())),
119+
)
118120

119121
cpu_diff_2 = np.divide(
120122
np.sum(np.square(self.float32_y.numpy() - self.float16_y)),
121-
np.sum(np.square(self.float32_y.numpy())))
123+
np.sum(np.square(self.float32_y.numpy())),
124+
)
122125
mlu_diff_2 = np.divide(
123126
np.sum(np.square(self.float32_y.numpy() - mlu_float16_y.numpy())),
124-
np.sum(np.square(self.float32_y.numpy())))
127+
np.sum(np.square(self.float32_y.numpy())),
128+
)
125129
assert mlu_diff_1 <= cpu_diff_1
126130
assert mlu_diff_2 <= cpu_diff_2
127131

@@ -130,17 +134,17 @@ class TestHardsigmoidAPI(unittest.TestCase):
130134
# test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
131135
def setUp(self):
132136
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
133-
self.place = paddle.CustomPlace('CustomMLU', 0)
137+
self.place = paddle.CustomPlace("CustomMLU", 0)
134138
self.__class__.use_custom_device = True
135139

136140
def test_static_api(self):
137141
with paddle.static.program_guard(paddle.static.Program()):
138-
x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
142+
x = paddle.static.data("X", self.x_np.shape, self.x_np.dtype)
139143
out1 = F.hardsigmoid(x)
140144
m = paddle.nn.Hardsigmoid()
141145
out2 = m(x)
142146
exe = paddle.static.Executor(self.place)
143-
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
147+
res = exe.run(feed={"X": self.x_np}, fetch_list=[out1, out2])
144148
out_ref = ref_hardsigmoid(self.x_np)
145149
for r in res:
146150
np.testing.assert_allclose(out_ref, r, rtol=1e-6)
@@ -159,16 +163,16 @@ def test_dygraph_api(self):
159163
def test_fluid_api(self):
160164
paddle.enable_static()
161165
with fluid.program_guard(fluid.Program()):
162-
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
163-
out = fluid.layers.hard_sigmoid(x)
166+
x = fluid.data("X", self.x_np.shape, self.x_np.dtype)
167+
out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
164168
exe = fluid.Executor(self.place)
165-
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
169+
res = exe.run(feed={"X": self.x_np}, fetch_list=[out])
166170
out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
167171
np.testing.assert_allclose(out_ref, res[0])
168172

169173
paddle.disable_static(self.place)
170174
x = paddle.to_tensor(self.x_np)
171-
out = paddle.fluid.layers.hard_sigmoid(x)
175+
out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
172176
np.testing.assert_allclose(out_ref, out.numpy())
173177
paddle.enable_static()
174178

@@ -177,14 +181,12 @@ def test_errors(self):
177181
# The input type must be Variable.
178182
self.assertRaises(TypeError, F.hardsigmoid, 1)
179183
# The input dtype must be float16, float32, float64.
180-
x_int32 = paddle.fluid.data(
181-
name='x_int32', shape=[12, 10], dtype='int32')
184+
x_int32 = paddle.fluid.data(name="x_int32", shape=[12, 10], dtype="int32")
182185
self.assertRaises(TypeError, F.hardsigmoid, x_int32)
183186
# support the input dtype is float16
184-
x_fp16 = paddle.fluid.data(
185-
name='x_fp16', shape=[12, 10], dtype='float16')
187+
x_fp16 = paddle.fluid.data(name="x_fp16", shape=[12, 10], dtype="float16")
186188
F.hardsigmoid(x_fp16)
187189

188190

189-
if __name__ == '__main__':
191+
if __name__ == "__main__":
190192
unittest.main()

backends/mlu/tests/unittests/test_huber_loss_op_mlu.py

Lines changed: 46 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from tests.op_test import OpTest
2020
import paddle
2121
import paddle.fluid as fluid
22-
from paddle.fluid import compiler, Program, program_guard
22+
from paddle.fluid import Program, program_guard
2323

2424
paddle.enable_static()
2525

@@ -33,31 +33,29 @@ def huber_loss_forward(val, delta):
3333

3434

3535
class TestHuberLossOp(OpTest):
36-
3736
def setUp(self):
38-
self.op_type = 'huber_loss'
37+
self.op_type = "huber_loss"
3938
self.set_mlu()
40-
self.python_api = paddle.fluid.layers.huber_loss
39+
self.python_api = paddle.nn.functional.smooth_l1_loss
4140
self.python_out_sig = ["Out"]
4241
self.delta = 1.0
4342
self.init_input()
4443
shape = self.set_shape()
45-
residual = self.inputs['Y'] - self.inputs['X']
46-
loss = np.vectorize(huber_loss_forward)(residual,
47-
self.delta).astype('float32')
48-
self.attrs = {'delta': self.delta}
49-
self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)}
44+
residual = self.inputs["Y"] - self.inputs["X"]
45+
loss = np.vectorize(huber_loss_forward)(residual, self.delta).astype("float32")
46+
self.attrs = {"delta": self.delta}
47+
self.outputs = {"Residual": residual, "Out": loss.reshape(shape)}
5048

5149
def init_input(self):
5250
shape = self.set_shape()
5351
self.inputs = {
54-
'X': np.random.uniform(0, 1., shape).astype('float32'),
55-
'Y': np.random.uniform(0, 1., shape).astype('float32'),
52+
"X": np.random.uniform(0, 1.0, shape).astype("float32"),
53+
"Y": np.random.uniform(0, 1.0, shape).astype("float32"),
5654
}
5755

5856
def set_mlu(self):
5957
self.__class__.use_custom_device = True
60-
self.place = paddle.CustomPlace('CustomMLU', 0)
58+
self.place = paddle.CustomPlace("CustomMLU", 0)
6159

6260
def set_shape(self):
6361
return (100, 1)
@@ -66,60 +64,68 @@ def test_check_output(self):
6664
self.check_output_with_place(self.place, atol=1e-3)
6765

6866
def test_check_grad_normal(self):
69-
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')
67+
self.check_grad_with_place(self.place, ["X", "Y"], "Out")
7068

7169
def test_check_grad_ingore_x(self):
72-
self.check_grad_with_place(self.place, ['Y'],
73-
'Out',
74-
max_relative_error=0.008,
75-
no_grad_set=set("residual"))
70+
self.check_grad_with_place(
71+
self.place,
72+
["Y"],
73+
"Out",
74+
max_relative_error=0.008,
75+
no_grad_set=set("residual"),
76+
)
7677

7778
def test_check_grad_ingore_y(self):
78-
self.check_grad_with_place(self.place, ['X'],
79-
'Out',
80-
max_relative_error=0.008,
81-
no_grad_set=set('residual'))
79+
self.check_grad_with_place(
80+
self.place,
81+
["X"],
82+
"Out",
83+
max_relative_error=0.008,
84+
no_grad_set=set("residual"),
85+
)
8286

8387

8488
def TestHuberLossOp1(TestHuberLossOp):
85-
8689
def set_shape(self):
87-
return (64)
90+
return 64
8891

8992

9093
def TestHuberLossOp2(TestHuberLossOp):
91-
9294
def set_shape(self):
9395
return (6, 6)
9496

9597

9698
def TestHuberLossOp3(TestHuberLossOp):
97-
9899
def set_shape(self):
99100
return (6, 6, 1)
100101

101102

102103
class TestHuberLossOpError(unittest.TestCase):
103-
104104
def test_errors(self):
105105
with program_guard(Program(), Program()):
106106
# the input and label must be Variable
107107
xw = np.random.random((6, 6)).astype("float32")
108-
xr = fluid.data(name='xr', shape=[None, 6], dtype="float32")
108+
xr = fluid.data(name="xr", shape=[None, 6], dtype="float32")
109109
lw = np.random.random((6, 6)).astype("float32")
110-
lr = fluid.data(name='lr', shape=[None, 6], dtype="float32")
110+
lr = fluid.data(name="lr", shape=[None, 6], dtype="float32")
111111
delta = 1.0
112-
self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw, delta)
113-
self.assertRaises(TypeError, fluid.layers.huber_loss, xw, lr, delta)
112+
self.assertRaises(
113+
TypeError, paddle.nn.functional.smooth_l1_loss, xr, lw, delta
114+
)
115+
self.assertRaises(
116+
TypeError, paddle.nn.functional.smooth_l1_loss, xw, lr, delta
117+
)
114118

115119
# the dtype of input and label must be float32 or float64
116-
xw2 = fluid.data(name='xw2', shape=[None, 6], dtype="int32")
117-
lw2 = fluid.data(name='lw2', shape=[None, 6], dtype="int32")
118-
self.assertRaises(TypeError, fluid.layers.huber_loss, xw2, lr,
119-
delta)
120-
self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw2,
121-
delta)
122-
123-
124-
if __name__ == '__main__':
125-
unittest.main()
120+
xw2 = fluid.data(name="xw2", shape=[None, 6], dtype="int32")
121+
lw2 = fluid.data(name="lw2", shape=[None, 6], dtype="int32")
122+
self.assertRaises(
123+
TypeError, paddle.nn.functional.smooth_l1_loss, xw2, lr, delta
124+
)
125+
self.assertRaises(
126+
TypeError, paddle.nn.functional.smooth_l1_loss, xr, lw2, delta
127+
)
128+
129+
130+
if __name__ == "__main__":
131+
unittest.main()

0 commit comments

Comments
 (0)