Skip to content

Commit 613a3ff

Browse files
authored
[XPU] add fp16 support for assign. update xccl to 1.0.9. (#50702)
1 parent d884573 commit 613a3ff

File tree

4 files changed

+72
-66
lines changed

4 files changed

+72
-66
lines changed

cmake/external/xpu.cmake

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ set(XPU_API_LIB_NAME "libxpuapi.so")
88
set(XPU_RT_LIB_NAME "libxpurt.so")
99

1010
set(XPU_BASE_DATE "20230220")
11-
set(XPU_XCCL_BASE_VERSION "1.0.8")
11+
set(XPU_XCCL_BASE_VERSION "1.0.9")
1212

1313
if(NOT DEFINED XPU_BASE_URL)
1414
set(XPU_BASE_URL_WITHOUT_DATE

paddle/phi/backends/xpu/xpu2_op_list.cc

+1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ XPUOpMap& get_kl2_ops() {
4646
XPUKernelSet({phi::DataType::FLOAT32,
4747
phi::DataType::FLOAT64,
4848
phi::DataType::INT32,
49+
phi::DataType::FLOAT16,
4950
phi::DataType::INT64,
5051
phi::DataType::BOOL})},
5152
{"assign_value", XPUKernelSet({phi::DataType::FLOAT32})},

paddle/phi/kernels/assign_kernel.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -179,5 +179,6 @@ PD_REGISTER_KERNEL(assign_value,
179179
bool,
180180
int,
181181
float,
182-
int64_t) {}
182+
int64_t,
183+
phi::dtype::float16) {}
183184
#endif
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
1+
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
22
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
@@ -13,72 +13,76 @@
1313
# limitations under the License.
1414

1515
import sys
16+
import unittest
17+
18+
import numpy as np
1619

1720
sys.path.append("..")
18-
import unittest
21+
22+
from op_test_xpu import XPUOpTest
23+
from xpu.get_test_cover_info import (
24+
XPUOpTestWrapper,
25+
create_test_class,
26+
get_xpu_op_support_types,
27+
)
1928

2029
import paddle
2130

22-
'''
23-
class TestAssignOp(op_test.OpTest):
24-
def setUp(self):
25-
self.op_type = "assign"
26-
x = np.random.random(size=(100, 10)).astype('float32')
27-
self.inputs = {'X': x}
28-
self.outputs = {'Out': x}
29-
30-
def test_forward(self):
31-
if paddle.is_compiled_with_xpu():
32-
place = paddle.XPUPlace(0)
33-
self.check_output_with_place(place)
34-
35-
def test_backward(self):
36-
if paddle.is_compiled_with_xpu():
37-
place = paddle.XPUPlace(0)
38-
self.check_grad_with_place(place, ['X'], 'Out')
39-
40-
41-
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
42-
def test_assign_LoDTensorArray(self):
43-
main_program = Program()
44-
startup_program = Program()
45-
with program_guard(main_program):
46-
x = fluid.data(name='x', shape=[100, 10], dtype='float32')
47-
x.stop_gradient = False
48-
y = fluid.layers.fill_constant(
49-
shape=[100, 10], dtype='float32', value=1)
50-
z = paddle.add(x=x, y=y)
51-
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
52-
init_array = paddle.tensor.array_write(x=z, i=i)
53-
array = fluid.layers.assign(init_array)
54-
sums = paddle.tensor.array_read(array=init_array, i=i)
55-
mean = paddle.mean(sums)
56-
append_backward(mean)
57-
58-
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
59-
) else fluid.CPUPlace()
60-
exe = fluid.Executor(place)
61-
feed_x = np.random.random(size=(100, 10)).astype('float32')
62-
ones = np.ones((100, 10)).astype('float32')
63-
feed_add = feed_x + ones
64-
res = exe.run(main_program,
65-
feed={'x': feed_x},
66-
fetch_list=[sums.name, x.grad_name])
67-
np.testing.assert_allclose(res[0], feed_add)
68-
np.testing.assert_allclose(res[1], ones / 1000.0)
69-
70-
71-
class TestAssignOpError(unittest.TestCase):
72-
def test_errors(self):
73-
with program_guard(Program(), Program()):
74-
# The type of input must be Variable or numpy.ndarray.
75-
x1 = fluid.create_lod_tensor(
76-
np.array([[-1]]), [[1]], fluid.XPUPlace(0))
77-
self.assertRaises(TypeError, fluid.layers.assign, x1)
78-
x2 = np.array([[2.5, 2.5]], dtype='uint8')
79-
self.assertRaises(TypeError, fluid.layers.assign, x2)
80-
'''
81-
82-
if __name__ == '__main__':
83-
paddle.enable_static()
31+
paddle.enable_static()
32+
33+
34+
class XPUTestAssignOP(XPUOpTestWrapper):
35+
def __init__(self):
36+
self.op_name = 'assign'
37+
self.use_dynamic_create_class = False
38+
39+
class TestAssignOPBase(XPUOpTest):
40+
def setUp(self):
41+
self.place = paddle.XPUPlace(0)
42+
self.init_dtype()
43+
self.set_case()
44+
45+
def set_case(self):
46+
self.op_type = 'assign'
47+
self.init_config()
48+
49+
x = np.random.random(size=self.input_shape).astype(self.dtype)
50+
self.inputs = {'X': x}
51+
self.attrs = {}
52+
self.outputs = {'Out': x}
53+
54+
def init_dtype(self):
55+
self.dtype = self.in_type
56+
57+
def test_check_output(self):
58+
self.check_output_with_place(self.place)
59+
60+
def test_check_grad(self):
61+
self.check_grad_with_place(self.place, ['X'], 'Out')
62+
63+
def init_config(self):
64+
self.input_shape = (2, 5)
65+
66+
class XPUTestAssign1(TestAssignOPBase):
67+
def init_config(self):
68+
self.input_shape = [2, 768]
69+
70+
class XPUTestAssign2(TestAssignOPBase):
71+
def init_config(self):
72+
self.input_shape = [3, 8, 4096]
73+
74+
class XPUTestAssign3(TestAssignOPBase):
75+
def init_config(self):
76+
self.input_shape = [1024]
77+
78+
class XPUTestAssign4(TestAssignOPBase):
79+
def init_config(self):
80+
self.input_shape = [2, 2, 255]
81+
82+
83+
support_types = get_xpu_op_support_types('assign')
84+
for stype in support_types:
85+
create_test_class(globals(), XPUTestAssignOP, stype)
86+
87+
if __name__ == "__main__":
8488
unittest.main()

0 commit comments

Comments
 (0)