Skip to content

Commit 8fa157a

Browse files
committed
Fix
1 parent fefced8 commit 8fa157a

File tree

3 files changed

+0
-237
lines changed

3 files changed

+0
-237
lines changed

paddle/fluid/pybind/bind_fleet_executor.cc

Lines changed: 0 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -214,90 +214,6 @@ void BindFleetExecutor(py::module* m) {
214214
self.Run(inputs, &outputs);
215215
return outputs;
216216
});
217-
218-
py::class_<DistModelDataBuf>(*m, "DistModelDataBuf")
219-
.def(py::init<size_t>())
220-
.def(py::init([](std::vector<float>& data) {
221-
auto buf = DistModelDataBuf(data.size() * sizeof(float));
222-
std::memcpy(buf.data(), static_cast<void*>(data.data()), buf.length());
223-
return buf;
224-
}))
225-
.def(py::init(&DistModelDataBufCreate<int32_t>))
226-
.def(py::init(&DistModelDataBufCreate<int64_t>))
227-
.def(py::init(&DistModelDataBufCreate<float>))
228-
.def(py::init(&DistModelDataBufCreate<phi::dtype::float16>))
229-
.def("reset",
230-
[](DistModelDataBuf& self, std::vector<float>& data) {
231-
self.Resize(data.size() * sizeof(float));
232-
std::memcpy(self.data(), data.data(), self.length());
233-
})
234-
.def("reset", &DistModelDataBufReset<int32_t>)
235-
.def("reset", &DistModelDataBufReset<int64_t>)
236-
.def("reset", &DistModelDataBufReset<float>)
237-
.def("reset", &DistModelDataBufReset<phi::dtype::float16>)
238-
.def("length", &DistModelDataBuf::length)
239-
.def(
240-
"tolist",
241-
[](DistModelDataBuf& self, const std::string& dtype) -> py::list {
242-
py::list l;
243-
if (dtype == "int32") {
244-
auto* data = static_cast<int32_t*>(self.data());
245-
auto size = self.length() / sizeof(int32_t);
246-
l = py::cast(std::vector<int32_t>(data, data + size));
247-
} else if (dtype == "int64") {
248-
auto* data = static_cast<int64_t*>(self.data());
249-
auto size = self.length() / sizeof(int64_t);
250-
l = py::cast(std::vector<int64_t>(data, data + size));
251-
} else if (dtype == "float32") {
252-
auto* data = static_cast<float*>(self.data());
253-
auto size = self.length() / sizeof(float);
254-
l = py::cast(std::vector<float>(data, data + size));
255-
} else if (dtype == "float16") {
256-
auto* data = static_cast<phi::dtype::float16*>(self.data());
257-
auto size = self.length() / sizeof(phi::dtype::float16);
258-
l = py::cast(std::vector<phi::dtype::float16>(data, data + size));
259-
} else {
260-
PADDLE_THROW(common::errors::Unimplemented(
261-
"Unsupported data type. Now only supports INT32, INT64, "
262-
"FLOAT16 and FLOAT32."));
263-
}
264-
return l;
265-
});
266-
267-
py::class_<DistModelTensor>(*m, "DistModelTensor")
268-
.def(py::init<>())
269-
.def(py::init(&DistModelTensorCreate<int32_t>),
270-
py::arg("data"),
271-
py::arg("name") = "",
272-
py::arg("lod") = std::vector<std::vector<size_t>>(),
273-
py::arg("copy") = true)
274-
.def(py::init(&DistModelTensorCreate<int64_t>),
275-
py::arg("data"),
276-
py::arg("name") = "",
277-
py::arg("lod") = std::vector<std::vector<size_t>>(),
278-
py::arg("copy") = true)
279-
.def(py::init(&DistModelTensorCreate<float>),
280-
py::arg("data"),
281-
py::arg("name") = "",
282-
py::arg("lod") = std::vector<std::vector<size_t>>(),
283-
py::arg("copy") = true)
284-
.def(py::init(&DistModelTensorCreate<phi::dtype::float16>),
285-
py::arg("data"),
286-
py::arg("name") = "",
287-
py::arg("lod") = std::vector<std::vector<size_t>>(),
288-
py::arg("copy") = true)
289-
.def_readwrite("name", &DistModelTensor::name)
290-
.def_readwrite("shape", &DistModelTensor::shape)
291-
.def_readwrite("data", &DistModelTensor::data)
292-
.def_readwrite("dtype", &DistModelTensor::dtype)
293-
.def_readwrite("lod", &DistModelTensor::lod)
294-
.def("as_ndarray", &DistModelTensorGetData);
295-
296-
py::enum_<DistModelDataType>(*m, "DistModelDataType")
297-
.value("FLOAT32", DistModelDataType::FLOAT32)
298-
.value("INT64", DistModelDataType::INT64)
299-
.value("INT32", DistModelDataType::INT32)
300-
.value("FLOAT16", DistModelDataType::FLOAT16);
301217
}
302218
} // namespace pybind
303219
} // namespace paddle

test/legacy_test/test_fleet_exe_dist_model_tensor.py

Lines changed: 0 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -14,77 +14,5 @@
1414

1515
import unittest
1616

17-
import numpy as np
18-
19-
import paddle
20-
from paddle.base.core import DistModelDataType, DistModelTensor
21-
22-
paddle.enable_static()
23-
24-
25-
class TestDistModelTensor(unittest.TestCase):
26-
def test_dist_model_tensor(self):
27-
tensor_32 = np.random.randint(10, 20, size=[20, 2]).astype('int32')
28-
dist_tensor32 = DistModelTensor(tensor_32, '32_tensor')
29-
self.assertEqual(dist_tensor32.dtype, DistModelDataType.INT32)
30-
self.assertEqual(
31-
dist_tensor32.data.tolist('int32'), tensor_32.ravel().tolist()
32-
)
33-
# the length is how many byte the data contains
34-
self.assertEqual(dist_tensor32.data.length(), 40 * 4)
35-
self.assertEqual(dist_tensor32.name, '32_tensor')
36-
dist_tensor32.data.reset(tensor_32)
37-
self.assertEqual(
38-
dist_tensor32.as_ndarray().ravel().tolist(),
39-
tensor_32.ravel().tolist(),
40-
)
41-
42-
tensor_64 = np.random.randint(10, 20, size=[20, 2]).astype('int64')
43-
dist_tensor64 = DistModelTensor(tensor_64, '64_tensor')
44-
self.assertEqual(dist_tensor64.dtype, DistModelDataType.INT64)
45-
self.assertEqual(
46-
dist_tensor64.data.tolist('int64'), tensor_64.ravel().tolist()
47-
)
48-
self.assertEqual(dist_tensor64.data.length(), 40 * 8)
49-
self.assertEqual(dist_tensor64.name, '64_tensor')
50-
dist_tensor64.data.reset(tensor_64)
51-
self.assertEqual(
52-
dist_tensor64.as_ndarray().ravel().tolist(),
53-
tensor_64.ravel().tolist(),
54-
)
55-
56-
tensor_float = np.random.randn(20, 2).astype('float32')
57-
dist_tensor_float = DistModelTensor(tensor_float, 'float_tensor')
58-
self.assertEqual(dist_tensor_float.dtype, DistModelDataType.FLOAT32)
59-
self.assertEqual(
60-
dist_tensor_float.data.tolist('float32'),
61-
tensor_float.ravel().tolist(),
62-
)
63-
self.assertEqual(dist_tensor_float.data.length(), 40 * 4)
64-
self.assertEqual(dist_tensor_float.name, 'float_tensor')
65-
dist_tensor_float.data.reset(tensor_float)
66-
self.assertEqual(
67-
dist_tensor_float.as_ndarray().ravel().tolist(),
68-
tensor_float.ravel().tolist(),
69-
)
70-
71-
tensor_float_16 = np.random.randn(20, 2).astype('float16')
72-
dist_tensor_float_16 = DistModelTensor(
73-
tensor_float_16, 'float_tensor_16'
74-
)
75-
self.assertEqual(dist_tensor_float_16.dtype, DistModelDataType.FLOAT16)
76-
self.assertEqual(
77-
dist_tensor_float_16.data.tolist('float16'),
78-
tensor_float_16.ravel().tolist(),
79-
)
80-
self.assertEqual(dist_tensor_float_16.data.length(), 40 * 2)
81-
self.assertEqual(dist_tensor_float_16.name, 'float_tensor_16')
82-
dist_tensor_float_16.data.reset(tensor_float_16)
83-
self.assertEqual(
84-
dist_tensor_float_16.as_ndarray().ravel().tolist(),
85-
tensor_float_16.ravel().tolist(),
86-
)
87-
88-
8917
if __name__ == '__main__':
9018
unittest.main()

test/xpu/test_fleet_exe_dist_model_run_xpu.py

Lines changed: 0 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -12,88 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import os
16-
import tempfile
1715
import unittest
1816

19-
import numpy as np
20-
21-
import paddle
22-
from paddle.base import core
23-
24-
paddle.enable_static()
25-
26-
27-
class TestDistModelRun(unittest.TestCase):
28-
def setUp(self):
29-
self.temp_dir = tempfile.TemporaryDirectory()
30-
31-
def tearDown(self):
32-
# step 6: clean up the env, delete the saved model and params
33-
print('cleaned up the env')
34-
self.temp_dir.cleanup()
35-
36-
def test_dist_model_run(self):
37-
with paddle.pir_utils.OldIrGuard():
38-
# step 0: declare folder to save the model and params
39-
path_prefix = os.path.join(
40-
self.temp_dir.name, "dist_model_run_test/inf"
41-
)
42-
43-
# step 1: saving the inference model and params
44-
x = paddle.static.data(name='x', shape=[28, 28], dtype='float32')
45-
y = paddle.static.data(name='y', shape=[28, 1], dtype='int64')
46-
predict = paddle.static.nn.fc(x, 10, activation='softmax')
47-
loss = paddle.nn.functional.cross_entropy(predict, y)
48-
avg_loss = paddle.tensor.stat.mean(loss)
49-
exe = paddle.static.Executor(paddle.XPUPlace(0))
50-
exe.run(paddle.static.default_startup_program())
51-
x_data = np.random.randn(28, 28).astype('float32')
52-
y_data = np.random.randint(0, 9, size=[28, 1]).astype('int64')
53-
exe.run(
54-
paddle.static.default_main_program(),
55-
feed={'x': x_data, 'y': y_data},
56-
fetch_list=[avg_loss],
57-
)
58-
paddle.static.save_inference_model(
59-
path_prefix, [x, y], [avg_loss], exe
60-
)
61-
print('save model to', path_prefix)
62-
63-
# step 2: prepare fake data for the inference
64-
x_tensor = np.random.randn(28, 28).astype('float32')
65-
y_tensor = np.random.randint(0, 9, size=[28, 1]).astype('int64')
66-
67-
# step 3: init the dist model to inference with fake data
68-
config = core.DistModelConfig()
69-
config.model_dir = path_prefix
70-
config.place = 'XPU'
71-
dist = core.DistModel(config)
72-
dist.init()
73-
dist_x = core.DistModelTensor(x_tensor, 'x')
74-
dist_y = core.DistModelTensor(y_tensor, 'y')
75-
input_data = [dist_x, dist_y]
76-
output_rst = dist.run(input_data)
77-
dist_model_rst = output_rst[0].as_ndarray().ravel().tolist()
78-
print("dist model rst:", dist_model_rst)
79-
80-
# step 4: use framework's api to inference with fake data
81-
[
82-
inference_program,
83-
feed_target_names,
84-
fetch_targets,
85-
] = paddle.static.load_inference_model(path_prefix, exe)
86-
results = exe.run(
87-
inference_program,
88-
feed={'x': x_tensor, 'y': y_tensor},
89-
fetch_list=fetch_targets,
90-
)
91-
load_inference_model_rst = results[0]
92-
print("load inference model api rst:", load_inference_model_rst)
93-
94-
# step 5: compare two results
95-
np.testing.assert_allclose(dist_model_rst, load_inference_model_rst)
96-
97-
9817
if __name__ == '__main__':
9918
unittest.main()

0 commit comments

Comments
 (0)