17
17
import unittest
18
18
import numpy as np
19
19
20
- from op_test import OpTest , convert_float_to_uint16
20
+ from op_test import OpTest
21
21
import paddle
22
22
import paddle .fluid as fluid
23
- from paddle .fluid import compiler
24
23
from paddle .static import Program , program_guard
25
- import paddle .fluid .core as core
26
24
27
25
28
26
def get_places (self ):
29
- return [paddle .CustomPlace (' custom_cpu' , 0 )]
27
+ return [paddle .CustomPlace (" custom_cpu" , 0 )]
30
28
31
29
32
30
OpTest ._get_places = get_places
@@ -41,7 +39,7 @@ def setUp(self):
41
39
self .attrs = {"shape" : self .new_shape }
42
40
self .outputs = {
43
41
"Out" : self .inputs ["X" ].reshape (self .infered_shape ),
44
- ' XShape' : np .random .random (self .ori_shape ).astype ("float32" )
42
+ " XShape" : np .random .random (self .ori_shape ).astype ("float32" ),
45
43
}
46
44
47
45
def init_data (self ):
@@ -50,7 +48,7 @@ def init_data(self):
50
48
self .infered_shape = (12 , 10 )
51
49
52
50
def test_check_output (self ):
53
- self .check_output (no_check_set = [' XShape' ])
51
+ self .check_output (no_check_set = [" XShape" ])
54
52
55
53
def test_check_grad (self ):
56
54
self .check_grad (["X" ], "Out" )
@@ -78,13 +76,12 @@ def setUp(self):
78
76
79
77
self .inputs = {
80
78
"X" : np .random .random (self .ori_shape ).astype ("float32" ),
81
- "Shape" : np .array (
82
- self .actual_shape , dtype = "int32" )
79
+ "Shape" : np .array (self .actual_shape , dtype = "int32" ),
83
80
}
84
81
self .attrs = {"shape" : self .new_shape }
85
82
self .outputs = {
86
83
"Out" : self .inputs ["X" ].reshape (self .actual_shape ),
87
- ' XShape' : np .random .random (self .ori_shape ).astype ("float32" )
84
+ " XShape" : np .random .random (self .ori_shape ).astype ("float32" ),
88
85
}
89
86
90
87
def init_data (self ):
@@ -93,7 +90,7 @@ def init_data(self):
93
90
self .actual_shape = (2 , 3 , 20 )
94
91
95
92
def test_check_output (self ):
96
- self .check_output (no_check_set = [' XShape' ])
93
+ self .check_output (no_check_set = [" XShape" ])
97
94
98
95
def test_check_grad (self ):
99
96
self .check_grad (["X" ], "Out" )
@@ -107,17 +104,16 @@ def setUp(self):
107
104
108
105
shape_tensor = []
109
106
for index , ele in enumerate (self .new_shape ):
110
- shape_tensor .append (("x" + str (index ), np .ones (
111
- (1 )).astype ('int32' ) * ele ))
107
+ shape_tensor .append (("x" + str (index ), np .ones ((1 )).astype ("int32" ) * ele ))
112
108
113
109
self .inputs = {
114
110
"X" : np .random .random (self .ori_shape ).astype ("float32" ),
115
- ' ShapeTensor' : shape_tensor
111
+ " ShapeTensor" : shape_tensor ,
116
112
}
117
- self .attrs = {' shape' : self .shape }
113
+ self .attrs = {" shape" : self .shape }
118
114
self .outputs = {
119
115
"Out" : self .inputs ["X" ].reshape (self .infered_shape ),
120
- ' XShape' : np .random .random (self .ori_shape ).astype ("float32" )
116
+ " XShape" : np .random .random (self .ori_shape ).astype ("float32" ),
121
117
}
122
118
123
119
def init_data (self ):
@@ -127,7 +123,7 @@ def init_data(self):
127
123
self .shape = (- 1 , - 1 )
128
124
129
125
def test_check_output (self ):
130
- self .check_output (no_check_set = [' XShape' ])
126
+ self .check_output (no_check_set = [" XShape" ])
131
127
132
128
def test_check_grad (self ):
133
129
self .check_grad (["X" ], "Out" )
@@ -157,13 +153,12 @@ def setUp(self):
157
153
158
154
self .inputs = {
159
155
"X" : np .random .random (self .ori_shape ).astype ("float32" ),
160
- "Shape" : np .array (
161
- self .new_shape , dtype = "int32" )
156
+ "Shape" : np .array (self .new_shape , dtype = "int32" ),
162
157
}
163
158
self .attrs = {}
164
159
self .outputs = {
165
160
"Out" : self .inputs ["X" ].reshape (self .infered_shape ),
166
- ' XShape' : np .random .random (self .ori_shape ).astype ("float32" )
161
+ " XShape" : np .random .random (self .ori_shape ).astype ("float32" ),
167
162
}
168
163
169
164
def init_data (self ):
@@ -172,7 +167,7 @@ def init_data(self):
172
167
self .infered_shape = (10 , 10 )
173
168
174
169
def test_check_output (self ):
175
- self .check_output (no_check_set = [' XShape' ])
170
+ self .check_output (no_check_set = [" XShape" ])
176
171
177
172
def test_check_grad (self ):
178
173
self .check_grad (["X" ], "Out" )
@@ -198,14 +193,11 @@ class TestReshapeOpBool(TestReshapeOp):
198
193
def setUp (self ):
199
194
self .init_data ()
200
195
self .op_type = "reshape2"
201
- self .inputs = {
202
- "X" : np .random .choice (
203
- [True , False ], size = self .ori_shape )
204
- }
196
+ self .inputs = {"X" : np .random .choice ([True , False ], size = self .ori_shape )}
205
197
self .attrs = {"shape" : self .new_shape }
206
198
self .outputs = {
207
199
"Out" : self .inputs ["X" ].reshape (self .infered_shape ),
208
- ' XShape' : np .random .random (self .ori_shape ).astype ("float32" )
200
+ " XShape" : np .random .random (self .ori_shape ).astype ("float32" ),
209
201
}
210
202
211
203
def test_check_grad (self ):
@@ -215,7 +207,7 @@ def test_check_grad(self):
215
207
# Test python API
216
208
class TestReshapeAPI (unittest .TestCase ):
217
209
def _set_paddle_api (self ):
218
- self .fill_constant = paddle .fluid . layers .fill_constant
210
+ self .fill_constant = paddle .tensor .fill_constant
219
211
self .data = paddle .static .data
220
212
self .to_tensor = paddle .to_tensor
221
213
self ._executed_api ()
@@ -224,7 +216,7 @@ def _executed_api(self):
224
216
self .reshape = paddle .reshape
225
217
226
218
def _set_fluid_api (self ):
227
- self .fill_constant = fluid . layers .fill_constant
219
+ self .fill_constant = paddle . tensor .fill_constant
228
220
self .data = paddle .static .data
229
221
self .reshape = fluid .layers .reshape
230
222
@@ -243,21 +235,20 @@ def _test_api(self):
243
235
out_1 = self .reshape (x , shape )
244
236
245
237
# situation 2: have shape(list, no tensor), have actual shape(Tensor)
246
- out_2 = fluid .layers .reshape (
247
- x , shape = shape , actual_shape = actual_shape )
238
+ out_2 = fluid .layers .reshape (x , shape = shape , actual_shape = actual_shape )
248
239
249
240
# Situation 3: have shape(list, have tensor), no actual shape(Tensor)
250
241
out_3 = self .reshape (x , shape = [positive_five , 10 ])
251
242
252
243
# Situation 4: have shape(Tensor), no actual shape(Tensor)
253
244
out_4 = self .reshape (x , shape = actual_shape )
254
245
255
- exe = paddle .static .Executor (place = paddle .CustomPlace (' custom_cpu' , 0 ))
246
+ exe = paddle .static .Executor (place = paddle .CustomPlace (" custom_cpu" , 0 ))
256
247
res_1 , res_2 , res_3 , res_4 = exe .run (
257
248
main_prog ,
258
- feed = {"x" : input ,
259
- "shape" : np . array ([ 2 , 5 , 5 ]). astype ( "int32" )} ,
260
- fetch_list = [ out_1 , out_2 , out_3 , out_4 ] )
249
+ feed = {"x" : input , "shape" : np . array ([ 2 , 5 , 5 ]). astype ( "int32" )},
250
+ fetch_list = [ out_1 , out_2 , out_3 , out_4 ] ,
251
+ )
261
252
262
253
assert np .array_equal (res_1 , input .reshape (shape ))
263
254
assert np .array_equal (res_2 , input .reshape (shape ))
@@ -276,7 +267,7 @@ def test_imperative(self):
276
267
self ._set_paddle_api ()
277
268
input = np .random .random ([2 , 25 ]).astype ("float32" )
278
269
shape = [2 , 5 , 5 ]
279
- with fluid .dygraph .guard (paddle .CustomPlace (' custom_cpu' , 0 )):
270
+ with fluid .dygraph .guard (paddle .CustomPlace (" custom_cpu" , 0 )):
280
271
x = self .to_tensor (input )
281
272
positive_five = self .fill_constant ([1 ], "int32" , 5 )
282
273
@@ -300,7 +291,7 @@ def test_imperative(self):
300
291
self ._set_paddle_api ()
301
292
input = np .random .random ([2 , 25 ]).astype ("float32" )
302
293
shape = [2 , 5 , 5 ]
303
- with fluid .dygraph .guard (paddle .CustomPlace (' custom_cpu' , 0 )):
294
+ with fluid .dygraph .guard (paddle .CustomPlace (" custom_cpu" , 0 )):
304
295
x = self .to_tensor (input )
305
296
positive_five = self .fill_constant ([1 ], "int32" , 5 )
306
297
@@ -324,7 +315,7 @@ def executed_api(self):
324
315
self .reshape = paddle .reshape
325
316
326
317
def test_out (self ):
327
- paddle .disable_static (paddle .CustomPlace (' custom_cpu' , 0 ))
318
+ paddle .disable_static (paddle .CustomPlace (" custom_cpu" , 0 ))
328
319
input_1 = np .random .random ([5 , 1 , 10 ]).astype ("int32" )
329
320
input = paddle .to_tensor (input_1 )
330
321
output = self .reshape (x = input , shape = [5 , 10 ])
@@ -333,7 +324,7 @@ def test_out(self):
333
324
self .assertTrue (np .allclose (expected_out , out_np ))
334
325
335
326
def test_out_uint8 (self ):
336
- paddle .disable_static (paddle .CustomPlace (' custom_cpu' , 0 ))
327
+ paddle .disable_static (paddle .CustomPlace (" custom_cpu" , 0 ))
337
328
input_1 = np .random .random ([5 , 1 , 10 ]).astype ("uint8" )
338
329
input = paddle .to_tensor (input_1 )
339
330
output = self .reshape (x = input , shape = [5 , 10 ])
@@ -342,7 +333,7 @@ def test_out_uint8(self):
342
333
self .assertTrue (np .allclose (expected_out , out_np ))
343
334
344
335
def test_out_float32 (self ):
345
- paddle .disable_static (paddle .CustomPlace (' custom_cpu' , 0 ))
336
+ paddle .disable_static (paddle .CustomPlace (" custom_cpu" , 0 ))
346
337
input_1 = np .random .random ([5 , 1 , 10 ]).astype ("float32" )
347
338
input = paddle .to_tensor (input_1 )
348
339
output = self .reshape (x = input , shape = [5 , 10 ])
0 commit comments