28
28
29
29
30
30
def ref_hardsigmoid (x , slope = 0.166666666666667 , offset = 0.5 ):
31
- return np .maximum (np .minimum (x * slope + offset , 1. ), 0. ).astype (x .dtype )
31
+ return np .maximum (np .minimum (x * slope + offset , 1.0 ), 0.0 ).astype (x .dtype )
32
32
33
33
34
34
class TestMLUHardSigmoid (OpTest ):
@@ -42,7 +42,7 @@ def setUp(self):
42
42
43
43
x = np .random .uniform (- 5 , 5 , [10 , 12 ]).astype (self .dtype )
44
44
lower_threshold = - self .offset / self .slope
45
- upper_threshold = (1. - self .offset ) / self .slope
45
+ upper_threshold = (1.0 - self .offset ) / self .slope
46
46
47
47
# Same reason as TestAbs
48
48
delta = 0.005
@@ -51,19 +51,19 @@ def setUp(self):
51
51
52
52
out = ref_hardsigmoid (x , self .slope , self .offset )
53
53
54
- self .attrs = {' slope' : self .slope , ' offset' : self .offset }
55
- self .inputs = {'X' : x }
56
- self .outputs = {' Out' : out }
54
+ self .attrs = {" slope" : self .slope , " offset" : self .offset }
55
+ self .inputs = {"X" : x }
56
+ self .outputs = {" Out" : out }
57
57
58
58
def test_check_output (self ):
59
59
self .check_output_with_place (self .place )
60
60
61
61
def test_check_grad (self ):
62
- self .check_grad_with_place (self .place , ['X' ], ' Out' )
62
+ self .check_grad_with_place (self .place , ["X" ], " Out" )
63
63
64
64
def set_mlu (self ):
65
65
self .__class__ .use_custom_device = True
66
- self .place = paddle .CustomPlace (' CustomMLU' , 0 )
66
+ self .place = paddle .CustomPlace (" CustomMLU" , 0 )
67
67
68
68
def init_dtype (self ):
69
69
self .dtype = np .float32
@@ -89,13 +89,13 @@ class TestMLUHardSigmoidFp16(unittest.TestCase):
89
89
def setUp (self ):
90
90
paddle .disable_static ()
91
91
92
- self .place = paddle .CustomPlace (' CustomMLU' , 0 )
92
+ self .place = paddle .CustomPlace (" CustomMLU" , 0 )
93
93
self .__class__ .use_custom_device = True
94
94
self .dtype = np .float32
95
95
96
96
# float32
97
97
self .float32_x = np .random .uniform (- 5 , 5 , [10 , 12 ]).astype (np .float32 )
98
- paddle .set_device (' cpu' )
98
+ paddle .set_device (" cpu" )
99
99
data = paddle .to_tensor (self .float32_x , stop_gradient = True )
100
100
self .float32_y = F .hardsigmoid (data )
101
101
@@ -105,23 +105,27 @@ def setUp(self):
105
105
106
106
def test_check_output_and_grad_mlu (self ):
107
107
# mlu float16
108
- paddle .set_device (' CustomMLU' )
108
+ paddle .set_device (" CustomMLU" )
109
109
data = paddle .to_tensor (self .float16_x , stop_gradient = True )
110
110
mlu_float16_y = F .hardsigmoid (data )
111
111
112
112
cpu_diff_1 = np .divide (
113
113
np .sum (np .abs (self .float32_y .numpy () - self .float16_y )),
114
- np .sum (np .abs (self .float32_y .numpy ())))
114
+ np .sum (np .abs (self .float32_y .numpy ())),
115
+ )
115
116
mlu_diff_1 = np .divide (
116
117
np .sum (np .abs (self .float32_y .numpy () - mlu_float16_y .numpy ())),
117
- np .sum (np .abs (self .float32_y .numpy ())))
118
+ np .sum (np .abs (self .float32_y .numpy ())),
119
+ )
118
120
119
121
cpu_diff_2 = np .divide (
120
122
np .sum (np .square (self .float32_y .numpy () - self .float16_y )),
121
- np .sum (np .square (self .float32_y .numpy ())))
123
+ np .sum (np .square (self .float32_y .numpy ())),
124
+ )
122
125
mlu_diff_2 = np .divide (
123
126
np .sum (np .square (self .float32_y .numpy () - mlu_float16_y .numpy ())),
124
- np .sum (np .square (self .float32_y .numpy ())))
127
+ np .sum (np .square (self .float32_y .numpy ())),
128
+ )
125
129
assert mlu_diff_1 <= cpu_diff_1
126
130
assert mlu_diff_2 <= cpu_diff_2
127
131
@@ -130,17 +134,17 @@ class TestHardsigmoidAPI(unittest.TestCase):
130
134
# test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
131
135
def setUp (self ):
132
136
self .x_np = np .random .uniform (- 1 , 1 , [10 , 12 ]).astype (np .float32 )
133
- self .place = paddle .CustomPlace (' CustomMLU' , 0 )
137
+ self .place = paddle .CustomPlace (" CustomMLU" , 0 )
134
138
self .__class__ .use_custom_device = True
135
139
136
140
def test_static_api (self ):
137
141
with paddle .static .program_guard (paddle .static .Program ()):
138
- x = paddle .static .data ('X' , self .x_np .shape , self .x_np .dtype )
142
+ x = paddle .static .data ("X" , self .x_np .shape , self .x_np .dtype )
139
143
out1 = F .hardsigmoid (x )
140
144
m = paddle .nn .Hardsigmoid ()
141
145
out2 = m (x )
142
146
exe = paddle .static .Executor (self .place )
143
- res = exe .run (feed = {'X' : self .x_np }, fetch_list = [out1 , out2 ])
147
+ res = exe .run (feed = {"X" : self .x_np }, fetch_list = [out1 , out2 ])
144
148
out_ref = ref_hardsigmoid (self .x_np )
145
149
for r in res :
146
150
np .testing .assert_allclose (out_ref , r , rtol = 1e-6 )
@@ -159,16 +163,16 @@ def test_dygraph_api(self):
159
163
def test_fluid_api (self ):
160
164
paddle .enable_static ()
161
165
with fluid .program_guard (fluid .Program ()):
162
- x = fluid .data ('X' , self .x_np .shape , self .x_np .dtype )
163
- out = fluid . layers . hard_sigmoid ( x )
166
+ x = fluid .data ("X" , self .x_np .shape , self .x_np .dtype )
167
+ out = paddle . nn . functional . hardsigmoid ( x , slope = 0.2 )
164
168
exe = fluid .Executor (self .place )
165
- res = exe .run (feed = {'X' : self .x_np }, fetch_list = [out ])
169
+ res = exe .run (feed = {"X" : self .x_np }, fetch_list = [out ])
166
170
out_ref = ref_hardsigmoid (self .x_np , 0.2 , 0.5 )
167
171
np .testing .assert_allclose (out_ref , res [0 ])
168
172
169
173
paddle .disable_static (self .place )
170
174
x = paddle .to_tensor (self .x_np )
171
- out = paddle .fluid . layers . hard_sigmoid ( x )
175
+ out = paddle .nn . functional . hardsigmoid ( x , slope = 0.2 )
172
176
np .testing .assert_allclose (out_ref , out .numpy ())
173
177
paddle .enable_static ()
174
178
@@ -177,14 +181,12 @@ def test_errors(self):
177
181
# The input type must be Variable.
178
182
self .assertRaises (TypeError , F .hardsigmoid , 1 )
179
183
# The input dtype must be float16, float32, float64.
180
- x_int32 = paddle .fluid .data (
181
- name = 'x_int32' , shape = [12 , 10 ], dtype = 'int32' )
184
+ x_int32 = paddle .fluid .data (name = "x_int32" , shape = [12 , 10 ], dtype = "int32" )
182
185
self .assertRaises (TypeError , F .hardsigmoid , x_int32 )
183
186
# support the input dtype is float16
184
- x_fp16 = paddle .fluid .data (
185
- name = 'x_fp16' , shape = [12 , 10 ], dtype = 'float16' )
187
+ x_fp16 = paddle .fluid .data (name = "x_fp16" , shape = [12 , 10 ], dtype = "float16" )
186
188
F .hardsigmoid (x_fp16 )
187
189
188
190
189
- if __name__ == ' __main__' :
191
+ if __name__ == " __main__" :
190
192
unittest .main ()
0 commit comments