@@ -398,6 +398,31 @@ static PyObject* tensor_method_numpy(TensorObject* self,
398
398
dense_tensor->place (),
399
399
dense_tensor->Holder ()->ptr (),
400
400
dense_tensor->Holder ()->size ());
401
+ } else if (self->tensor .is_dist_tensor ()) {
402
+ #ifdef PADDLE_WITH_DISTRIBUTE
403
+ VLOG (6 ) << " Getting DistTensor's numpy value" ;
404
+ auto * dist_tensor =
405
+ static_cast <phi::distributed::DistTensor*>(self->tensor .impl ().get ());
406
+ auto dense_tensor = ReshardXToReplicated (dist_tensor);
407
+
408
+ cpu_tensor.set_meta (dense_tensor.meta ());
409
+ auto tmp_allocation_ptr =
410
+ memory::Alloc (cpu_place, dense_tensor.Holder ()->size ());
411
+ cpu_tensor.ResetHolder (std::shared_ptr<phi::Allocation>(
412
+ tmp_allocation_ptr.release (), tmp_allocation_ptr.get_deleter ()));
413
+ paddle::memory::Copy (place,
414
+ cpu_tensor.Holder ()->ptr (),
415
+ dense_tensor.place (),
416
+ dense_tensor.Holder ()->ptr (),
417
+ dense_tensor.Holder ()->size ());
418
+ #else
419
+ PADDLE_THROW (
420
+ common::errors::Unavailable (" The `numpy()` method of (Dist)Tensor "
421
+ " is not supported in the current "
422
+ " PaddlePaddle, please recompile and "
423
+ " installPaddlePaddle with the option "
424
+ " of `WITH_DISTRIBUTE=ON`." ));
425
+ #endif
401
426
} else {
402
427
VLOG (6 ) << " Getting DenseTensor's numpy value" ;
403
428
auto dense_tensor =
0 commit comments