Skip to content

[cherry-pick 2.4] Fix to_dlpack (#50138) #50250

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 53 additions & 1 deletion paddle/fluid/framework/dlpack_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,59 @@ struct DLDeviceVisitor
};
} // namespace internal

DLPackTensor::DLPackTensor(const Tensor &tensor, LaneType lanes) {
struct PaddleDLMTensor {
phi::DenseTensor handle;
DLManagedTensor tensor;
};

void deleter(DLManagedTensor *arg) {
delete[] arg->dl_tensor.shape;
delete[] arg->dl_tensor.strides;
delete static_cast<PaddleDLMTensor *>(arg->manager_ctx);
}

DLManagedTensor *toDLPack(const phi::DenseTensor &src) {
PaddleDLMTensor *pdDLMTensor(new PaddleDLMTensor);
pdDLMTensor->handle = const_cast<phi::DenseTensor &>(src);
pdDLMTensor->tensor.manager_ctx = pdDLMTensor;
pdDLMTensor->tensor.deleter = &deleter;
pdDLMTensor->tensor.dl_tensor.data = const_cast<void *>(src.data());

// init ndim
using DimType = decltype(pdDLMTensor->tensor.dl_tensor.ndim); // int
pdDLMTensor->tensor.dl_tensor.ndim = static_cast<DimType>(src.dims().size());
DimType ndim = pdDLMTensor->tensor.dl_tensor.ndim;

// init shape
auto shape = new int64_t[ndim];
for (DimType i = 0; i < ndim; ++i) {
shape[i] = src.dims()[i];
}
pdDLMTensor->tensor.dl_tensor.shape = shape;

// init stride
auto strides = new int64_t[ndim];
for (DimType i = 0; i < ndim; ++i) {
strides[i] = 1;
}
for (DimType i = ndim - 2; i >= 0; --i) {
strides[i] = shape[i + 1] * strides[i + 1];
}
pdDLMTensor->tensor.dl_tensor.strides = strides;

// init device, DLDevice type with device_type and device_id
auto place = src.place();
pdDLMTensor->tensor.dl_tensor.device =
paddle::platform::VisitPlace(place, internal::DLDeviceVisitor());

pdDLMTensor->tensor.dl_tensor.dtype = internal::GetDLDataTypeFromTypeIndex(
framework::TransToProtoVarType(src.dtype()));

pdDLMTensor->tensor.dl_tensor.byte_offset = 0;
return &(pdDLMTensor->tensor);
}

DLPackTensor::DLPackTensor(const phi::DenseTensor &tensor, LaneType lanes) {
// init data, data buffer
t_.data = const_cast<void *>(tensor.data());

Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/framework/dlpack_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class DLPackTensor {
std::remove_reference<decltype(::DLTensor::shape[0])>::type; // int64_t

// lanes is only used in CPU to enable vectorization
explicit DLPackTensor(const Tensor& tensor, LaneType lanes = 1);
explicit DLPackTensor(const phi::DenseTensor& tensor, LaneType lanes = 1);

inline operator const ::DLTensor&() const { return t_; }

Expand All @@ -44,5 +44,7 @@ class DLPackTensor {
ShapeType shape_[DDim::kMaxRank];
};

DLManagedTensor* toDLPack(const phi::DenseTensor& src);

} // namespace framework
} // namespace paddle
23 changes: 8 additions & 15 deletions paddle/fluid/pybind/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -472,23 +472,16 @@ void BindTensor(pybind11::module &m) { // NOLINT
print(t.shape()) # [5, 30]
)DOC")
.def("_to_dlpack",
[](framework::Tensor &self) {
DLPackTensor dlpack_tensor(self, 1);
DLManagedTensor *dmt = dlpack_tensor.ToDLManagedTensor();
auto capsule = py::capsule(
[](phi::DenseTensor &self) {
DLManagedTensor *dmt = framework::toDLPack(self);
auto capsule = pybind11::capsule(
static_cast<void *>(dmt), "dltensor", [](PyObject *ptr) {
if (ptr) {
auto dltensor = new DLManagedTensor;
try {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "used_dltensor"));
return;
} catch (...) {
dltensor = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "dltensor"));
}
dltensor->deleter(dltensor);
if (!PyCapsule_IsValid(ptr, "dltensor")) {
return;
}
DLManagedTensor *dmt = static_cast<DLManagedTensor *>(
PyCapsule_GetPointer(ptr, "dltensor"));
dmt->deleter(dmt);
});
return capsule;
})
Expand Down
42 changes: 28 additions & 14 deletions python/paddle/tests/test_dlpack.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,20 @@


class TestDLPack(unittest.TestCase):

def func_test_dlpack_dygraph(self):
paddle.disable_static()
tensor = paddle.to_tensor(np.array([1, 2, 3, 4]).astype('int'))
dlpack = paddle.utils.dlpack.to_dlpack(tensor)
out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack)
if paddle.fluid.framework.in_dygraph_mode():
self.assertTrue(
isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor))
isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor)
)
else:
self.assertTrue(isinstance(out_from_dlpack, paddle.Tensor))
np.testing.assert_array_equal(np.array(out_from_dlpack),
np.array([1, 2, 3, 4]).astype('int'))
np.testing.assert_array_equal(
np.array(out_from_dlpack), np.array([1, 2, 3, 4]).astype('int')
)

def test_dlpack_dygraph(self):
with _test_eager_guard():
Expand All @@ -58,26 +59,32 @@ def test_dlpack_tensor_larger_than_2dim(self):
def test_dlpack_static(self):
paddle.enable_static()
tensor = fluid.create_lod_tensor(
np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]],
fluid.CPUPlace())
np.array([[1], [2], [3], [4]]).astype('int'),
[[1, 3]],
fluid.CPUPlace(),
)
dlpack = paddle.utils.dlpack.to_dlpack(tensor)
out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack)
self.assertTrue(isinstance(out_from_dlpack, fluid.core.Tensor))
np.testing.assert_array_equal(
np.array(out_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int'))
np.array([[1], [2], [3], [4]]).astype('int'),
)

# when build with cuda
if core.is_compiled_with_cuda():
gtensor = fluid.create_lod_tensor(
np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]],
fluid.CUDAPlace(0))
np.array([[1], [2], [3], [4]]).astype('int'),
[[1, 3]],
fluid.CUDAPlace(0),
)
gdlpack = paddle.utils.dlpack.to_dlpack(gtensor)
gout_from_dlpack = paddle.utils.dlpack.from_dlpack(gdlpack)
self.assertTrue(isinstance(gout_from_dlpack, fluid.core.Tensor))
np.testing.assert_array_equal(
np.array(gout_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int'))
np.array([[1], [2], [3], [4]]).astype('int'),
)

def func_test_dlpack_dtype_conversion(self):
paddle.disable_static()
Expand All @@ -104,7 +111,8 @@ def func_test_dlpack_dtype_conversion(self):
for dtype in complex_dtypes:
x = paddle.to_tensor(
[[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]],
dtype=dtype)
dtype=dtype,
)
dlpack = paddle.utils.dlpack.to_dlpack(x)
o = paddle.utils.dlpack.from_dlpack(dlpack)
self.assertEqual(x.dtype, o.dtype)
Expand All @@ -115,12 +123,18 @@ def test_dlpack_dtype_conversion(self):
self.func_test_dlpack_dtype_conversion()
self.func_test_dlpack_dtype_conversion()

def test_to_dlpack_for_loop(self):
# See Paddle issue 50120
for i in range(10):
x = paddle.rand([3, 5])
dlpack = paddle.utils.dlpack.to_dlpack(x)

class TestRaiseError(unittest.TestCase):

class TestRaiseError(unittest.TestCase):
def func_test_from_dlpack_raise_type_error(self):
self.assertRaises(TypeError, paddle.utils.dlpack.from_dlpack,
np.zeros(5))
self.assertRaises(
TypeError, paddle.utils.dlpack.from_dlpack, np.zeros(5)
)

def test_from_dlpack_raise_type_error(self):
with _test_eager_guard():
Expand Down