|
1 |
| -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. |
| 1 | +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. |
2 | 2 | #
|
3 | 3 | # Licensed under the Apache License, Version 2.0 (the "License");
|
4 | 4 | # you may not use this file except in compliance with the License.
|
|
13 | 13 | # limitations under the License.
|
14 | 14 |
|
15 | 15 | import sys
|
| 16 | +import unittest |
| 17 | + |
| 18 | +import numpy as np |
16 | 19 |
|
17 | 20 | sys.path.append("..")
|
18 |
| -import unittest |
| 21 | + |
| 22 | +from op_test_xpu import XPUOpTest |
| 23 | +from xpu.get_test_cover_info import ( |
| 24 | + XPUOpTestWrapper, |
| 25 | + create_test_class, |
| 26 | + get_xpu_op_support_types, |
| 27 | +) |
19 | 28 |
|
20 | 29 | import paddle
|
21 | 30 |
|
22 |
| -''' |
23 |
| -class TestAssignOp(op_test.OpTest): |
24 |
| - def setUp(self): |
25 |
| - self.op_type = "assign" |
26 |
| - x = np.random.random(size=(100, 10)).astype('float32') |
27 |
| - self.inputs = {'X': x} |
28 |
| - self.outputs = {'Out': x} |
29 |
| -
|
30 |
| - def test_forward(self): |
31 |
| - if paddle.is_compiled_with_xpu(): |
32 |
| - place = paddle.XPUPlace(0) |
33 |
| - self.check_output_with_place(place) |
34 |
| -
|
35 |
| - def test_backward(self): |
36 |
| - if paddle.is_compiled_with_xpu(): |
37 |
| - place = paddle.XPUPlace(0) |
38 |
| - self.check_grad_with_place(place, ['X'], 'Out') |
39 |
| -
|
40 |
| -
|
41 |
| -class TestAssignOpWithLoDTensorArray(unittest.TestCase): |
42 |
| - def test_assign_LoDTensorArray(self): |
43 |
| - main_program = Program() |
44 |
| - startup_program = Program() |
45 |
| - with program_guard(main_program): |
46 |
| - x = fluid.data(name='x', shape=[100, 10], dtype='float32') |
47 |
| - x.stop_gradient = False |
48 |
| - y = fluid.layers.fill_constant( |
49 |
| - shape=[100, 10], dtype='float32', value=1) |
50 |
| - z = paddle.add(x=x, y=y) |
51 |
| - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) |
52 |
| - init_array = paddle.tensor.array_write(x=z, i=i) |
53 |
| - array = fluid.layers.assign(init_array) |
54 |
| - sums = paddle.tensor.array_read(array=init_array, i=i) |
55 |
| - mean = paddle.mean(sums) |
56 |
| - append_backward(mean) |
57 |
| -
|
58 |
| - place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( |
59 |
| - ) else fluid.CPUPlace() |
60 |
| - exe = fluid.Executor(place) |
61 |
| - feed_x = np.random.random(size=(100, 10)).astype('float32') |
62 |
| - ones = np.ones((100, 10)).astype('float32') |
63 |
| - feed_add = feed_x + ones |
64 |
| - res = exe.run(main_program, |
65 |
| - feed={'x': feed_x}, |
66 |
| - fetch_list=[sums.name, x.grad_name]) |
67 |
| - np.testing.assert_allclose(res[0], feed_add) |
68 |
| - np.testing.assert_allclose(res[1], ones / 1000.0) |
69 |
| -
|
70 |
| -
|
71 |
| -class TestAssignOpError(unittest.TestCase): |
72 |
| - def test_errors(self): |
73 |
| - with program_guard(Program(), Program()): |
74 |
| - # The type of input must be Variable or numpy.ndarray. |
75 |
| - x1 = fluid.create_lod_tensor( |
76 |
| - np.array([[-1]]), [[1]], fluid.XPUPlace(0)) |
77 |
| - self.assertRaises(TypeError, fluid.layers.assign, x1) |
78 |
| - x2 = np.array([[2.5, 2.5]], dtype='uint8') |
79 |
| - self.assertRaises(TypeError, fluid.layers.assign, x2) |
80 |
| -''' |
81 |
| - |
82 |
| -if __name__ == '__main__': |
83 |
| - paddle.enable_static() |
| 31 | +paddle.enable_static() |
| 32 | + |
| 33 | + |
| 34 | +class XPUTestAssignOP(XPUOpTestWrapper): |
| 35 | + def __init__(self): |
| 36 | + self.op_name = 'assign' |
| 37 | + self.use_dynamic_create_class = False |
| 38 | + |
| 39 | + class TestAssignOPBase(XPUOpTest): |
| 40 | + def setUp(self): |
| 41 | + self.place = paddle.XPUPlace(0) |
| 42 | + self.init_dtype() |
| 43 | + self.set_case() |
| 44 | + |
| 45 | + def set_case(self): |
| 46 | + self.op_type = 'assign' |
| 47 | + self.init_config() |
| 48 | + |
| 49 | + x = np.random.random(size=self.input_shape).astype(self.dtype) |
| 50 | + self.inputs = {'X': x} |
| 51 | + self.attrs = {} |
| 52 | + self.outputs = {'Out': x} |
| 53 | + |
| 54 | + def init_dtype(self): |
| 55 | + self.dtype = self.in_type |
| 56 | + |
| 57 | + def test_check_output(self): |
| 58 | + self.check_output_with_place(self.place) |
| 59 | + |
| 60 | + def test_check_grad(self): |
| 61 | + self.check_grad_with_place(self.place, ['X'], 'Out') |
| 62 | + |
| 63 | + def init_config(self): |
| 64 | + self.input_shape = (2, 5) |
| 65 | + |
| 66 | + class XPUTestAssign1(TestAssignOPBase): |
| 67 | + def init_config(self): |
| 68 | + self.input_shape = [2, 768] |
| 69 | + |
| 70 | + class XPUTestAssign2(TestAssignOPBase): |
| 71 | + def init_config(self): |
| 72 | + self.input_shape = [3, 8, 4096] |
| 73 | + |
| 74 | + class XPUTestAssign3(TestAssignOPBase): |
| 75 | + def init_config(self): |
| 76 | + self.input_shape = [1024] |
| 77 | + |
| 78 | + class XPUTestAssign4(TestAssignOPBase): |
| 79 | + def init_config(self): |
| 80 | + self.input_shape = [2, 2, 255] |
| 81 | + |
| 82 | + |
| 83 | +support_types = get_xpu_op_support_types('assign') |
| 84 | +for stype in support_types: |
| 85 | + create_test_class(globals(), XPUTestAssignOP, stype) |
| 86 | + |
| 87 | +if __name__ == "__main__": |
84 | 88 | unittest.main()
|
0 commit comments