diff --git a/paddle/phi/kernels/cpu/cholesky_kernel.cc b/paddle/phi/kernels/cpu/cholesky_kernel.cc index aaa312995d2826..6580724975c42d 100644 --- a/paddle/phi/kernels/cpu/cholesky_kernel.cc +++ b/paddle/phi/kernels/cpu/cholesky_kernel.cc @@ -32,12 +32,12 @@ void CholeskyKernel(const Context& dev_ctx, using InputMatrixMap = Eigen::Map; using OutputMatrixMap = Eigen::Map; - auto& dims = x.dims(); - if (x.numel() == 0) { - out->Resize(dims); + if (out->numel() == 0) { dev_ctx.template Alloc(out); return; } + + auto& dims = x.dims(); int batch_count = 1; for (int i = 0; i < dims.size() - 2; i++) { batch_count *= static_cast(dims[i]); diff --git a/paddle/phi/kernels/gpu/cholesky_kernel.cu b/paddle/phi/kernels/gpu/cholesky_kernel.cu index 755adb96e21a44..8a831d2ee299a8 100644 --- a/paddle/phi/kernels/gpu/cholesky_kernel.cu +++ b/paddle/phi/kernels/gpu/cholesky_kernel.cu @@ -114,12 +114,12 @@ void CholeskyKernel(const Context& dev_ctx, const DenseTensor& x, bool upper, DenseTensor* out) { - auto& dims = x.dims(); - if (x.numel() == 0) { - out->Resize(dims); + if (out->numel() == 0) { dev_ctx.template Alloc(out); return; } + + auto& dims = x.dims(); int batch_count = 1; for (int i = 0; i < dims.size() - 2; i++) { batch_count *= dims[i]; diff --git a/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h b/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h index 8a2e1484302f0a..5d146dae8d53e2 100644 --- a/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h @@ -242,14 +242,13 @@ void CholeskyGradKernel(const Context& dev_ctx, const DenseTensor& out_grad, bool upper, DenseTensor* x_grad) { - auto* x_grad_data = dev_ctx.template Alloc(x_grad); - - auto& dims = out.dims(); - if (out.numel() == 0) { - x_grad->Resize(dims); + if (x_grad->numel() == 0) { dev_ctx.template Alloc(x_grad); return; } + + auto* x_grad_data = dev_ctx.template Alloc(x_grad); + auto& dims = out.dims(); int batch_count = 1; for (int i = 0; i < dims.size() - 2; i++) { batch_count *= dims[i];