@@ -90,6 +90,72 @@ def init_shape(self):
90
90
self .shape = []
91
91
92
92
93
+ class TestExpPrimFp32 (OpTest ):
94
+ def setUp (self ):
95
+ self .op_type = "exp"
96
+ self .prim_op_type = "prim"
97
+ self .init_dtype ()
98
+ self .init_shape ()
99
+ self .python_api = paddle .exp
100
+
101
+ np .random .seed (2049 )
102
+ x = np .random .uniform (0.1 , 1 , self .shape ).astype (self .dtype )
103
+ out = np .exp (x )
104
+
105
+ self .inputs = {'X' : OpTest .np_dtype_to_fluid_dtype (x )}
106
+ self .outputs = {'Out' : out }
107
+ self .skip_cinn ()
108
+ self .set_only_prim ()
109
+
110
+ def test_check_output (self ):
111
+ self .check_output ()
112
+
113
+ def test_check_grad (self ):
114
+ self .check_grad (['X' ], 'Out' , check_prim = True )
115
+
116
+ def init_dtype (self ):
117
+ self .dtype = np .float32
118
+
119
+ def init_shape (self ):
120
+ self .shape = [12 , 17 ]
121
+
122
+ def skip_cinn (self ):
123
+ self .enable_cinn = False
124
+
125
+ def set_only_prim (self ):
126
+ pass
127
+
128
+
129
+ class TestExpPrimFp64 (TestExpPrimFp32 ):
130
+ def init_dtype (self ):
131
+ self .dtype = np .float64
132
+
133
+
134
+ class TestExpPrimFp16 (TestExpPrimFp32 ):
135
+ def init_dtype (self ):
136
+ self .dtype = np .float16
137
+
138
+ def set_only_prim (self ):
139
+ self .only_prim = True
140
+
141
+ def test_check_output (self ):
142
+ self .check_output ()
143
+
144
+ def test_check_grad (self ):
145
+ self .check_grad (['X' ], 'Out' , check_prim = True )
146
+
147
+ def skip_cinn (self ):
148
+ self .enable_cinn = False
149
+
150
+
151
+ class TestExpPrim_ZeroDim (TestExpPrimFp32 ):
152
+ def init_shape (self ):
153
+ self .shape = []
154
+
155
+ def skip_cinn (self ):
156
+ self .enable_cinn = False
157
+
158
+
93
159
class TestExpm1 (TestActivation ):
94
160
def setUp (self ):
95
161
self .op_type = "expm1"
@@ -167,6 +233,8 @@ def test_errors(self):
167
233
class TestParameter :
168
234
def test_out_name (self ):
169
235
with fluid .program_guard (fluid .Program ()):
236
+ if paddle .fluid .framework .in_dygraph_mode ():
237
+ paddle .enable_static ()
170
238
np_x = np .array ([0.1 ]).astype ('float32' ).reshape ((- 1 , 1 ))
171
239
data = paddle .static .data (name = "X" , shape = [- 1 , 1 ], dtype = "float32" )
172
240
out = eval ("paddle.%s(data, name='Y')" % self .op_type )
@@ -1062,6 +1130,7 @@ def test_errors(self):
1062
1130
class TestSqrt (TestActivation , TestParameter ):
1063
1131
def setUp (self ):
1064
1132
self .op_type = "sqrt"
1133
+ self .prim_op_type = "prim"
1065
1134
self .python_api = paddle .sqrt
1066
1135
self .init_dtype ()
1067
1136
self .init_shape ()
@@ -1072,7 +1141,9 @@ def setUp(self):
1072
1141
1073
1142
self .inputs = {'X' : OpTest .np_dtype_to_fluid_dtype (x )}
1074
1143
self .outputs = {'Out' : out }
1144
+ self .enable_cinn = False
1075
1145
1146
+ # TODO(wanghao107) add prim test
1076
1147
def test_check_grad (self ):
1077
1148
if self .dtype == np .float16 :
1078
1149
return
@@ -1082,17 +1153,58 @@ def test_check_output(self):
1082
1153
self .check_output (check_eager = True )
1083
1154
1084
1155
1156
+ class TestSqrtPrimFp32 (TestActivation ):
1157
+ def setUp (self ):
1158
+ self .op_type = "sqrt"
1159
+ self .prim_op_type = "prim"
1160
+ self .python_api = paddle .sqrt
1161
+ self .init_dtype ()
1162
+ self .init_shape ()
1163
+ np .random .seed (1023 )
1164
+ x = np .random .uniform (0.1 , 1 , self .shape ).astype (self .dtype )
1165
+ out = np .sqrt (x )
1166
+
1167
+ self .inputs = {'X' : OpTest .np_dtype_to_fluid_dtype (x )}
1168
+ self .outputs = {'Out' : out }
1169
+ self .enable_cinn = False
1170
+
1171
+ def test_check_grad (self ):
1172
+ if self .dtype == np .float16 :
1173
+ return
1174
+ self .check_grad (['X' ], 'Out' , check_eager = True , check_prim = True )
1175
+
1176
+ def test_check_output (self ):
1177
+ self .check_output (check_eager = True )
1178
+
1179
+ def init_dtype (self ):
1180
+ self .dtype = np .float32
1181
+
1182
+
1085
1183
class TestSqrt_ZeroDim (TestSqrt ):
1086
1184
def init_shape (self ):
1087
1185
self .shape = []
1088
1186
1089
1187
1188
+ class TestSqrtPrim_ZeroDim (TestSqrt ):
1189
+ def init_shape (self ):
1190
+ self .shape = []
1191
+
1192
+ def init_dtype (self ):
1193
+ self .dtype = np .float32
1194
+
1195
+ def test_check_grad (self ):
1196
+ if self .dtype == np .float16 :
1197
+ return
1198
+ self .check_grad (['X' ], 'Out' , check_prim = True )
1199
+
1200
+
1090
1201
@unittest .skipIf (
1091
1202
not core .is_compiled_with_cuda (), "core is not compiled with CUDA"
1092
1203
)
1093
1204
class TestSqrtBF16 (OpTest ):
1094
1205
def setUp (self ):
1095
1206
self .op_type = "sqrt"
1207
+ self .prim_op_type = "prim"
1096
1208
self .python_api = paddle .sqrt
1097
1209
self .init_dtype ()
1098
1210
self .init_shape ()
@@ -1105,6 +1217,8 @@ def setUp(self):
1105
1217
'X' : OpTest .np_dtype_to_fluid_dtype (convert_float_to_uint16 (x ))
1106
1218
}
1107
1219
self .outputs = {'Out' : convert_float_to_uint16 (out )}
1220
+ # TODO(wanghao107): add prim test
1221
+ self .enable_cinn = False
1108
1222
1109
1223
def init_dtype (self ):
1110
1224
self .dtype = np .uint16
0 commit comments