|
21 | 21 | #include <iostream>
|
22 | 22 | #include <optional>
|
23 | 23 | #include <variant>
|
| 24 | +#include "paddle/common/macros.h" |
24 | 25 | #include "paddle/phi/api/include/api.h"
|
25 | 26 | #include "paddle/phi/api/include/tensor.h"
|
26 | 27 | #include "paddle/phi/common/bfloat16.h"
|
@@ -91,7 +92,7 @@ class ArrayRef {
|
91 | 92 | };
|
92 | 93 | using IntArrayRef = ArrayRef<int64_t>;
|
93 | 94 |
|
94 |
| -enum class MemoryFormat : int8_t { |
| 95 | +enum class PADDLE_API MemoryFormat : int8_t { |
95 | 96 | Contiguous,
|
96 | 97 | Preserve,
|
97 | 98 | ChannelsLast,
|
@@ -122,15 +123,15 @@ using BFloat16 = c10::BFloat16;
|
122 | 123 | _(uint16_t, UINT16, UInt16) \
|
123 | 124 | _(uint32_t, UINT32, UInt32)
|
124 | 125 |
|
125 |
| -enum class ScalarType : int8_t { |
| 126 | +enum class PADDLE_API ScalarType : int8_t { |
126 | 127 | #define DEFINE_ST_ENUM_VAL_(_1, _2, n) n,
|
127 | 128 | FORALL_PADDLE_AND_TORCH_DTYPES(DEFINE_ST_ENUM_VAL_)
|
128 | 129 | #undef DEFINE_ENUM_ST_ENUM_VAL_
|
129 | 130 | Undefined,
|
130 | 131 | NumOptions
|
131 | 132 | };
|
132 | 133 |
|
133 |
| -struct TensorOptions { |
| 134 | +struct PADDLE_API TensorOptions { |
134 | 135 | TensorOptions()
|
135 | 136 | : requires_grad_(false),
|
136 | 137 | pinned_memory_(false),
|
@@ -273,28 +274,18 @@ using Dtype = at::ScalarType;
|
273 | 274 |
|
274 | 275 | void compiling_test() {
|
275 | 276 | // Example usage of the Tensor class
|
276 |
| - std::cout << "111111"; |
277 | 277 | at::Tensor a = at::ones({2, 3}, at::TensorOptions());
|
278 |
| - std::cout << "222222"; |
279 | 278 | at::Tensor b = at::full({2, 3}, 1, at::ScalarType::Float);
|
280 |
| - std::cout << "333333"; |
281 | 279 | double c = 10;
|
282 | 280 | at::Tensor a_contig = a.contiguous();
|
283 |
| - std::cout << "444444"; |
284 | 281 | at::Tensor b_contig = b.contiguous();
|
285 |
| - std::cout << "555555"; |
286 | 282 | at::Tensor result = at::empty(a_contig.sizes(), a_contig.options());
|
287 |
| - std::cout << "666666"; |
288 | 283 | const float* a_ptr = a_contig.data_ptr<float>();
|
289 |
| - std::cout << "777777"; |
290 | 284 | const float* b_ptr = b_contig.data_ptr<float>();
|
291 |
| - std::cout << "888888"; |
292 | 285 | float* result_ptr = result.data_ptr<float>();
|
293 |
| - std::cout << "999999"; |
294 | 286 | for (int64_t i = 0; i < a_contig.numel(); i++) {
|
295 | 287 | result_ptr[i] = a_ptr[i] * b_ptr[i] + c;
|
296 | 288 | }
|
297 |
| - std::cout << "000000"; |
298 | 289 | // Show result
|
299 | 290 | for (int64_t i = 0; i < a_contig.numel(); i++) {
|
300 | 291 | std::cout << "Result[" << i << "] = " << a_ptr[i] * b_ptr[i] + c
|
|
0 commit comments