Skip to content

Commit 717aeca

Browse files
authored
datatype for experimental to phi (#456)
1 parent 8a8c8d9 commit 717aeca

24 files changed

+80
-80
lines changed

backends/npu/kernels/accuracy_kernel.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@ void AccuracyRawKernel(const Context& dev_ctx,
3838
cast_indices.set_meta(meta);
3939
cast_label.set_meta(meta);
4040
if (indices.dtype() != label.dtype()) {
41-
auto dst_dtype = ConvertToNpuDtype(paddle::experimental::DataType::INT32);
42-
if (indices.dtype() != paddle::experimental::DataType::INT32) {
41+
auto dst_dtype = ConvertToNpuDtype(phi::DataType::INT32);
42+
if (indices.dtype() != phi::DataType::INT32) {
4343
cast_indices.Resize(indices.dims());
4444
dev_ctx.template Alloc<int>(&cast_indices);
4545
const auto& runner_cast_indices =
@@ -51,7 +51,7 @@ void AccuracyRawKernel(const Context& dev_ctx,
5151
} else {
5252
cast_indices = indices;
5353
}
54-
if (label.dtype() != paddle::experimental::DataType::INT32) {
54+
if (label.dtype() != phi::DataType::INT32) {
5555
cast_label.Resize(label.dims());
5656
dev_ctx.template Alloc<int>(&cast_label);
5757
const auto& runner_cast_label =

backends/npu/kernels/amp/update_loss_scaling_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ class LazyZerosNPU {
216216
if (!found_inf_vec[0]) {
217217
TensorCopy(dev_ctx, *x, false, out);
218218
} else if (zero_ptr != dst_ptr) {
219-
auto size = out->numel() * paddle::experimental::SizeOf(out->dtype());
219+
auto size = out->numel() * phi::SizeOf(out->dtype());
220220
aclrtMemcpyAsync(
221221
dst_ptr, size, zero_ptr, size, ACL_MEMCPY_DEVICE_TO_DEVICE, stream);
222222
}

backends/npu/kernels/assign_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ void AssignValueKernel(const Context& dev_ctx,
9191
phi::DataType dtype,
9292
const std::vector<phi::Scalar>& values,
9393
phi::DenseTensor* out) {
94-
auto template_dtype = paddle::experimental::CppTypeToDataType<T>::Type();
94+
auto template_dtype = phi::CppTypeToDataType<T>::Type();
9595
PADDLE_ENFORCE_EQ(
9696
dtype,
9797
template_dtype,

backends/npu/kernels/cast_kernel.cc

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ namespace custom_kernel {
2121
template <typename T, typename Context>
2222
void CastKernel(const Context& dev_ctx,
2323
const phi::DenseTensor& x,
24-
phi::DenseTensorMeta::DataType dtype,
24+
phi::DataType dtype,
2525
phi::DenseTensor* out) {
2626
if (x.dtype() == dtype) {
2727
dev_ctx.template Alloc<T>(out);
@@ -31,27 +31,27 @@ void CastKernel(const Context& dev_ctx,
3131

3232
int aclDtype = ConvertToNpuDtype(dtype);
3333

34-
if (dtype == phi::DenseTensorMeta::DataType::FLOAT32) {
34+
if (dtype == phi::DataType::FLOAT32) {
3535
dev_ctx.template Alloc<float>(out);
36-
} else if (dtype == phi::DenseTensorMeta::DataType::FLOAT64) {
36+
} else if (dtype == phi::DataType::FLOAT64) {
3737
dev_ctx.template Alloc<double>(out);
38-
} else if (dtype == phi::DenseTensorMeta::DataType::FLOAT16) {
38+
} else if (dtype == phi::DataType::FLOAT16) {
3939
dev_ctx.template Alloc<phi::dtype::float16>(out);
40-
} else if (dtype == phi::DenseTensorMeta::DataType::INT16) {
40+
} else if (dtype == phi::DataType::INT16) {
4141
dev_ctx.template Alloc<int16_t>(out);
42-
} else if (dtype == phi::DenseTensorMeta::DataType::INT32) {
42+
} else if (dtype == phi::DataType::INT32) {
4343
dev_ctx.template Alloc<int32_t>(out);
44-
} else if (dtype == phi::DenseTensorMeta::DataType::INT64) {
44+
} else if (dtype == phi::DataType::INT64) {
4545
dev_ctx.template Alloc<int64_t>(out);
46-
} else if (dtype == phi::DenseTensorMeta::DataType::BOOL) {
46+
} else if (dtype == phi::DataType::BOOL) {
4747
dev_ctx.template Alloc<bool>(out);
48-
} else if (dtype == phi::DenseTensorMeta::DataType::UINT8) {
48+
} else if (dtype == phi::DataType::UINT8) {
4949
dev_ctx.template Alloc<uint8_t>(out);
50-
} else if (dtype == phi::DenseTensorMeta::DataType::INT8) {
50+
} else if (dtype == phi::DataType::INT8) {
5151
dev_ctx.template Alloc<int8_t>(out);
52-
} else if (dtype == phi::DenseTensorMeta::DataType::COMPLEX64) {
52+
} else if (dtype == phi::DataType::COMPLEX64) {
5353
dev_ctx.template Alloc<phi::dtype::complex<float>>(out);
54-
} else if (dtype == phi::DenseTensorMeta::DataType::COMPLEX128) {
54+
} else if (dtype == phi::DataType::COMPLEX128) {
5555
dev_ctx.template Alloc<phi::dtype::complex<double>>(out);
5656
} else {
5757
phi::errors::InvalidArgument("Unsupported cast dtype %s", dtype);

backends/npu/kernels/coalesce_tensor_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ void CoalesceTensorKernel(const Context &dev_ctx,
212212
size_t numel = 0;
213213

214214
if (size_of_dtype == -1) {
215-
size_of_dtype = paddle::experimental::SizeOf(dtype);
215+
size_of_dtype = phi::SizeOf(dtype);
216216
}
217217
GetMemSizeAndDtype(
218218
input, &numel, size_of_dtype, dev_ctx.GetPlace(), use_align, align_size);

backends/npu/kernels/compare_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ namespace custom_kernel {
1919
template <typename T, typename Context>
2020
void CastKernel(const Context& dev_ctx,
2121
const phi::DenseTensor& x,
22-
phi::DenseTensorMeta::DataType dtype,
22+
phi::DataType dtype,
2323
phi::DenseTensor* out);
2424

2525
template <typename T, typename Context>

backends/npu/kernels/funcs/format_utils.cc

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -36,19 +36,19 @@
3636

3737
#include "kernels/funcs/string_helper.h"
3838

39-
static std::map<paddle::experimental::DataType, aclDataType> //
39+
static std::map<phi::DataType, aclDataType> //
4040
DTYPE_2_ACL_DTYPE = {
41-
{paddle::experimental::DataType::BOOL, ACL_BOOL},
42-
{paddle::experimental::DataType::UINT8, ACL_UINT8},
43-
{paddle::experimental::DataType::INT8, ACL_INT8},
44-
{paddle::experimental::DataType::INT16, ACL_INT16},
45-
{paddle::experimental::DataType::INT32, ACL_INT32},
46-
{paddle::experimental::DataType::INT64, ACL_INT64},
47-
{paddle::experimental::DataType::FLOAT16, ACL_FLOAT16},
48-
{paddle::experimental::DataType::FLOAT32, ACL_FLOAT},
49-
{paddle::experimental::DataType::FLOAT64, ACL_DOUBLE},
50-
{paddle::experimental::DataType::COMPLEX64, ACL_COMPLEX64},
51-
{paddle::experimental::DataType::COMPLEX128, ACL_COMPLEX128},
41+
{phi::DataType::BOOL, ACL_BOOL},
42+
{phi::DataType::UINT8, ACL_UINT8},
43+
{phi::DataType::INT8, ACL_INT8},
44+
{phi::DataType::INT16, ACL_INT16},
45+
{phi::DataType::INT32, ACL_INT32},
46+
{phi::DataType::INT64, ACL_INT64},
47+
{phi::DataType::FLOAT16, ACL_FLOAT16},
48+
{phi::DataType::FLOAT32, ACL_FLOAT},
49+
{phi::DataType::FLOAT64, ACL_DOUBLE},
50+
{phi::DataType::COMPLEX64, ACL_COMPLEX64},
51+
{phi::DataType::COMPLEX128, ACL_COMPLEX128},
5252
};
5353

5454
static std::map<phi::DataLayout, aclFormat> DATA_LAYOUT_2_ACL_FORMAT = {
@@ -59,7 +59,7 @@ static std::map<phi::DataLayout, aclFormat> DATA_LAYOUT_2_ACL_FORMAT = {
5959
{phi::DataLayout::ANY, ACL_FORMAT_ND},
6060
};
6161

62-
aclDataType ConvertToNpuDtype(paddle::experimental::DataType dtype) {
62+
aclDataType ConvertToNpuDtype(phi::DataType dtype) {
6363
auto iter = DTYPE_2_ACL_DTYPE.find(dtype);
6464
PADDLE_ENFORCE_NE(
6565
iter,

backends/npu/kernels/funcs/format_utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
#include "paddle/extension.h"
4040
#include "paddle/phi/extension.h"
4141

42-
aclDataType ConvertToNpuDtype(paddle::experimental::DataType dtype);
42+
aclDataType ConvertToNpuDtype(phi::DataType dtype);
4343
aclFormat ConvertToNpuFormat(phi::DataLayout layout);
4444

4545
using FormatShape = std::vector<int64_t>;

backends/npu/kernels/funcs/npu_funcs.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ inline void TensorCopy(const Context& dev_ctx,
8686
C_Stream stream = static_cast<C_Stream>(dev_ctx.stream());
8787

8888
auto size = (src.dims().size() != 0 ? src.numel() : 1) *
89-
paddle::experimental::SizeOf(src.dtype());
89+
phi::SizeOf(src.dtype());
9090
if (UNLIKELY(size) == 0) {
9191
return;
9292
}

backends/npu/kernels/funcs/npu_op_prepare.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ inline void AllocNPUTensor(const Context& dev_ctx,
9292
phi::DenseTensor* tensor) {
9393
auto requested_size = PrepareTensorWithFormat(tensor, format);
9494
dev_ctx.template Alloc<T>(
95-
tensor, requested_size * paddle::experimental::SizeOf(tensor->dtype()));
95+
tensor, requested_size * phi::SizeOf(tensor->dtype()));
9696
}
9797

9898
} // namespace custom_kernel

backends/npu/kernels/funcs/npu_op_runner.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ NpuOpRunner &NpuOpRunner::AddAttrDataType(const std::string &name,
149149
}
150150
VLOG(4) << "AddAttrDataType call";
151151
auto dtype = ConvertToNpuDtype(
152-
static_cast<paddle::experimental::DataType>(paddle::get<int>(attr)));
152+
static_cast<phi::DataType>(paddle::get<int>(attr)));
153153
PADDLE_ENFORCE_NPU_SUCCESS(aclopSetAttrDataType(attr_, name.c_str(), dtype));
154154
return *this;
155155
}

backends/npu/kernels/funcs/npu_op_runner.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ class NpuOpRunner {
102102
const std::vector<phi::DenseTensor> &,
103103
const NPUAttributeMap &,
104104
const phi::CustomContext &)> op_runner,
105-
const std::vector<paddle::experimental::DataType> &input_type,
106-
const std::vector<paddle::experimental::DataType> &output_type) {
105+
const std::vector<phi::DataType> &input_type,
106+
const std::vector<phi::DataType> &output_type) {
107107
std::function<void(const std::vector<phi::DenseTensor> &,
108108
const std::vector<phi::DenseTensor> &,
109109
const NPUAttributeMap &,
@@ -137,15 +137,15 @@ class NpuOpRunner {
137137
const NPUAttributeMap &,
138138
const phi::CustomContext &,
139139
const std::vector<std::vector<T>> &)> op_runner,
140-
const std::vector<paddle::experimental::DataType> &input_type,
141-
const std::vector<paddle::experimental::DataType> &output_type,
140+
const std::vector<phi::DataType> &input_type,
141+
const std::vector<phi::DataType> &output_type,
142142
const std::vector<std::vector<T>> &&host_vecs = {}) {
143143
std::vector<phi::DenseTensor> tmp_inputs(inputs.size());
144144
std::vector<phi::DenseTensor> tmp_outputs(outputs.size());
145145

146146
for (size_t i = 0; i < input_type.size(); ++i) {
147147
bool cast_input =
148-
(input_type[i] == paddle::experimental::DataType::UNDEFINED ||
148+
(input_type[i] == phi::DataType::UNDEFINED ||
149149
input_type[i] != inputs[i].dtype());
150150
if (!cast_input) {
151151
tmp_inputs[i] = inputs[i];
@@ -163,7 +163,7 @@ class NpuOpRunner {
163163
}
164164
for (size_t i = 0; i < output_type.size(); ++i) {
165165
bool cast_output =
166-
(output_type[i] == paddle::experimental::DataType::UNDEFINED ||
166+
(output_type[i] == phi::DataType::UNDEFINED ||
167167
output_type[i] != outputs[i].dtype());
168168
if (!cast_output) {
169169
tmp_outputs[i] = outputs[i];
@@ -177,7 +177,7 @@ class NpuOpRunner {
177177

178178
for (size_t i = 0; i < output_type.size(); ++i) {
179179
bool cast_output =
180-
(output_type[i] == paddle::experimental::DataType::UNDEFINED ||
180+
(output_type[i] == phi::DataType::UNDEFINED ||
181181
output_type[i] != outputs[i].dtype());
182182
if (cast_output) {
183183
const auto &cast_runner = NpuOpRunner(

backends/npu/kernels/funcs/slice_utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ inline phi::DenseTensor Slice(const phi::DenseTensor& src,
197197
dst_dims[0] = end_idx - begin_idx;
198198
size_t dst_offset =
199199
meta.offset +
200-
begin_idx * base * paddle::experimental::SizeOf(meta.dtype);
200+
begin_idx * base * phi::SizeOf(meta.dtype);
201201
phi::DenseTensorMeta dst_meta = {
202202
meta.dtype, dst_dims, meta.layout, dst_offset};
203203
dst.set_meta(dst_meta);

backends/npu/kernels/index_sample_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ void IndexSampleKernel(const Context& dev_ctx,
5656
dev_ctx.template Alloc<T>(out);
5757

5858
const auto& index_type = index.dtype();
59-
if (index_type == phi::DenseTensorMeta::DataType::INT32) {
59+
if (index_type == phi::DataType::INT32) {
6060
IndexSampleGather<int32_t, Context>(dev_ctx, &index, &x, out);
6161
} else {
6262
IndexSampleGather<int64_t, Context>(dev_ctx, &index, &x, out);
@@ -104,7 +104,7 @@ void IndexSampleGradKernel(const Context& dev_ctx,
104104
dev_ctx.template Alloc<T>(x_grad);
105105

106106
const auto& index_type = index.dtype();
107-
if (index_type == phi::DenseTensorMeta::DataType::INT32) {
107+
if (index_type == phi::DataType::INT32) {
108108
IndexSampleGradScatter<int32_t, Context>(
109109
dev_ctx, &index, &out_grad, x_grad);
110110
} else {

backends/npu/kernels/masked_select_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ void MaskedSelectKernel(const Context& dev_ctx,
5050
{mask_int32},
5151
{{"dst_type",
5252
static_cast<int32_t>(
53-
ConvertToNpuDtype(phi::DenseTensorMeta::DataType::INT32))}});
53+
ConvertToNpuDtype(phi::DataType::INT32))}});
5454
cast_runner.Run(stream);
5555

5656
mask_int32.Resize({mask_int32.numel()});
@@ -103,7 +103,7 @@ void MaskedSelectGradKernel(const Context& dev_ctx,
103103
{mask_int32},
104104
{{"dst_type",
105105
static_cast<int32_t>(
106-
ConvertToNpuDtype(phi::DenseTensorMeta::DataType::INT32))}});
106+
ConvertToNpuDtype(phi::DataType::INT32))}});
107107
cast_runner.Run(stream);
108108

109109
mask_int32.Resize({mask_int32.numel()});

backends/npu/kernels/randperm_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ namespace custom_kernel {
2020
template <typename T, typename Context>
2121
void RandpermRawKernel(const Context& dev_ctx,
2222
int n,
23-
phi::DenseTensorMeta::DataType dtype,
23+
phi::DataType dtype,
2424
unsigned int seed,
2525
phi::DenseTensor* out) {
2626
std::shared_ptr<std::mt19937_64> engine;
@@ -54,7 +54,7 @@ void RandpermRawKernel(const Context& dev_ctx,
5454
template <typename T, typename Context>
5555
void RandpermKernel(const Context& dev_ctx,
5656
int n,
57-
phi::DenseTensorMeta::DataType dtype,
57+
phi::DataType dtype,
5858
phi::DenseTensor* out) {
5959
custom_kernel::RandpermRawKernel<T, Context>(dev_ctx, n, dtype, 0, out);
6060
}

backends/npu/kernels/reduce_max_kernel.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ void MaxRawKernel(const Context& dev_ctx,
3838
attr_input = {{"axes", dim_vec}, {"keep_dims", keep_dim}};
3939
}
4040

41-
if (x.dtype() == phi::DenseTensorMeta::DataType::INT64) {
41+
if (x.dtype() == phi::DataType::INT64) {
4242
auto op_func = [](const std::vector<phi::DenseTensor>& inputs,
4343
const std::vector<phi::DenseTensor>& outputs,
4444
const NPUAttributeMap& attrs,
@@ -53,8 +53,8 @@ void MaxRawKernel(const Context& dev_ctx,
5353
attr_input,
5454
dev_ctx,
5555
op_func,
56-
{phi::DenseTensorMeta::DataType::INT32},
57-
{phi::DenseTensorMeta::DataType::INT32});
56+
{phi::DataType::INT32},
57+
{phi::DataType::INT32});
5858
} else {
5959
const auto& runner = NpuOpRunner("ReduceMaxD", {x}, {*out}, attr_input);
6060
runner.Run(dev_ctx.stream());

backends/npu/kernels/reduce_min_kernel.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ void MinRawKernel(const Context& dev_ctx,
3838
attr_input = {{"axes", dim_vec}, {"keep_dims", keep_dim}};
3939
}
4040

41-
if (x.dtype() == phi::DenseTensorMeta::DataType::INT64) {
41+
if (x.dtype() == phi::DataType::INT64) {
4242
auto op_func = [](const std::vector<phi::DenseTensor>& inputs,
4343
const std::vector<phi::DenseTensor>& outputs,
4444
const NPUAttributeMap& attrs,
@@ -53,8 +53,8 @@ void MinRawKernel(const Context& dev_ctx,
5353
attr_input,
5454
dev_ctx,
5555
op_func,
56-
{phi::DenseTensorMeta::DataType::INT32},
57-
{phi::DenseTensorMeta::DataType::INT32});
56+
{phi::DataType::INT32},
57+
{phi::DataType::INT32});
5858
} else {
5959
const auto& runner = NpuOpRunner("ReduceMinD", {x}, {*out}, attr_input);
6060
runner.Run(dev_ctx.stream());

backends/npu/kernels/reduce_prod_kernel.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ void ProdKernel(const Context& dev_ctx,
4242
attr_input = {{"axes", dim_vec}, {"keep_dims", keep_dim}};
4343
}
4444

45-
if (x.dtype() == phi::DenseTensorMeta::DataType::INT64) {
45+
if (x.dtype() == phi::DataType::INT64) {
4646
auto op_func = [](const std::vector<phi::DenseTensor>& inputs,
4747
const std::vector<phi::DenseTensor>& outputs,
4848
const NPUAttributeMap& attrs,
@@ -57,8 +57,8 @@ void ProdKernel(const Context& dev_ctx,
5757
attr_input,
5858
dev_ctx,
5959
op_func,
60-
{phi::DenseTensorMeta::DataType::INT32},
61-
{phi::DenseTensorMeta::DataType::INT32});
60+
{phi::DataType::INT32},
61+
{phi::DataType::INT32});
6262
} else {
6363
// TODO(Aganlengzi): remove this branch when performance of ReduceProdD
6464
// is good enough for big shapes.

backends/npu/kernels/reduce_sum_kernel.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ void SumRawKernel(const Context& dev_ctx,
7474
const phi::IntArray& axes,
7575
bool keep_dim,
7676
bool reduce_all,
77-
phi::DenseTensorMeta::DataType out_dtype,
77+
phi::DataType out_dtype,
7878
phi::DenseTensor* out) {
7979
auto dims = axes.GetData();
8080
dev_ctx.template Alloc<T>(out);
@@ -141,7 +141,7 @@ template <typename T, typename Context>
141141
void SumKernel(const Context& dev_ctx,
142142
const phi::DenseTensor& x,
143143
const phi::IntArray& dims,
144-
phi::DenseTensorMeta::DataType out_dtype,
144+
phi::DataType out_dtype,
145145
bool keep_dim,
146146
phi::DenseTensor* out) {
147147
bool reduce_all = false;
@@ -220,7 +220,7 @@ PD_REGISTER_PLUGIN_KERNEL(sum_raw,
220220
int64_t,
221221
phi::dtype::float16,
222222
float) {
223-
kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
223+
kernel->OutputAt(0).SetDataType(phi::DataType::UNDEFINED);
224224
}
225225

226226
PD_REGISTER_PLUGIN_KERNEL(sum,
@@ -232,7 +232,7 @@ PD_REGISTER_PLUGIN_KERNEL(sum,
232232
int64_t,
233233
phi::dtype::float16,
234234
float) {
235-
kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
235+
kernel->OutputAt(0).SetDataType(phi::DataType::UNDEFINED);
236236
}
237237

238238
PD_REGISTER_PLUGIN_KERNEL(sum_grad,

backends/npu/kernels/rmsprop_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ void RmspropDenseKernel(const Context& dev_ctx,
5252
if (centered) {
5353
NPUAttributeMap attr_input = {{"use_locking", false}};
5454

55-
phi::DenseTensorMeta tmp_meta = {paddle::experimental::DataType::FLOAT32,
55+
phi::DenseTensorMeta tmp_meta = {phi::DataType::FLOAT32,
5656
{1}};
5757

5858
phi::DenseTensor rho_tmp;

0 commit comments

Comments
 (0)