@@ -35,13 +35,22 @@ PD_DECLARE_KERNEL(tanh_grad, CPU, ALL_LAYOUT);
35
35
PD_DECLARE_KERNEL (pow, CPU, ALL_LAYOUT);
36
36
PD_DECLARE_KERNEL (scale, CPU, ALL_LAYOUT);
37
37
PD_DECLARE_KERNEL (multiply, CPU, ALL_LAYOUT);
38
+ PD_DECLARE_KERNEL (bitwise_and, CPU, ALL_LAYOUT);
39
+ PD_DECLARE_KERNEL (bitwise_or, CPU, ALL_LAYOUT);
40
+ PD_DECLARE_KERNEL (bitwise_xor, CPU, ALL_LAYOUT);
41
+ PD_DECLARE_KERNEL (bitwise_not, CPU, ALL_LAYOUT);
38
42
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
39
43
PD_DECLARE_KERNEL (full, GPU, ALL_LAYOUT);
40
44
PD_DECLARE_KERNEL (tanh, GPU, ALL_LAYOUT);
41
45
PD_DECLARE_KERNEL (tanh_grad, GPU, ALL_LAYOUT);
42
46
PD_DECLARE_KERNEL (pow, GPU, ALL_LAYOUT);
43
47
PD_DECLARE_KERNEL (scale, GPU, ALL_LAYOUT);
44
48
PD_DECLARE_KERNEL (multiply, KPS, ALL_LAYOUT);
49
+ PD_DECLARE_KERNEL (bitwise_and, KPS, ALL_LAYOUT);
50
+ PD_DECLARE_KERNEL (bitwise_or, KPS, ALL_LAYOUT);
51
+ PD_DECLARE_KERNEL (bitwise_xor, KPS, ALL_LAYOUT);
52
+ PD_DECLARE_KERNEL (bitwise_not, KPS, ALL_LAYOUT);
53
+
45
54
#endif
46
55
47
56
namespace paddle {
@@ -81,7 +90,7 @@ TEST(EagerPrim, TanhBackwardTest) {
81
90
82
91
paddle::experimental::Tensor out1 = tanh_ad_func(tensor1);
83
92
std::vector<paddle::experimental::Tensor> outs1 = {out1};
84
- // Disable prim
93
+ // Enable prim
85
94
PrimCommonUtils::SetBwdPrimEnabled (true );
86
95
ASSERT_TRUE (PrimCommonUtils::IsBwdPrimEnabled ());
87
96
// 4. Run Backward
@@ -104,6 +113,44 @@ TEST(EagerPrim, TanhBackwardTest) {
104
113
->data<float>()[0]);
105
114
}
106
115
116
+ TEST (EagerPrim, LogicalOperantsTest) {
117
+ // 1. Initialized
118
+ eager_test::InitEnv (paddle::platform::CPUPlace ());
119
+ FLAGS_tensor_operants_mode = " eager" ;
120
+ paddle::prim::InitTensorOperants ();
121
+ // 2. pre
122
+ paddle::framework::DDim ddim = phi::make_ddim ({4 , 16 , 16 , 32 });
123
+ paddle::experimental::Tensor tensor0 =
124
+ ::egr::egr_utils_api::CreateTensorWithValue (ddim,
125
+ paddle::platform::CPUPlace (),
126
+ phi::DataType::INT32,
127
+ phi::DataLayout::NCHW,
128
+ 1 /* value*/ ,
129
+ true /* is_leaf*/ );
130
+ ::egr::egr_utils_api::RetainGradForTensor (tensor0);
131
+ paddle::experimental::Tensor tensor1 =
132
+ ::egr::egr_utils_api::CreateTensorWithValue (ddim,
133
+ paddle::platform::CPUPlace (),
134
+ phi::DataType::INT32,
135
+ phi::DataLayout::NCHW,
136
+ 0 /* value*/ ,
137
+ true /* is_leaf*/ );
138
+ ::egr::egr_utils_api::RetainGradForTensor (tensor1);
139
+ // 3. Run Forward once
140
+ paddle::experimental::Tensor out0 = tensor0 & tensor1;
141
+ paddle::experimental::Tensor out1 = bitwise_and_ad_func(tensor0, tensor1);
142
+ EXPECT_EQ (out0.data<int >()[0], out1.data<int>()[0]);
143
+ out0 = tensor0 | tensor1;
144
+ out1 = bitwise_or_ad_func(tensor0, tensor1);
145
+ EXPECT_EQ (out0.data <int >()[0 ], out1.data <int >()[0 ]);
146
+ out0 = tensor0 ^ tensor1;
147
+ out1 = bitwise_xor_ad_func (tensor0, tensor1);
148
+ EXPECT_EQ (out0.data <int >()[0 ], out1.data <int >()[0 ]);
149
+ out0 = ~tensor0;
150
+ out1 = bitwise_not_ad_func (tensor0);
151
+ EXPECT_EQ (out0.data <int >()[0 ], out1.data <int >()[0 ]);
152
+ }
153
+
107
154
TEST (EagerPrim, TestFlags) {
108
155
PrimCommonUtils::SetBwdPrimEnabled (true );
109
156
ASSERT_TRUE (PrimCommonUtils::IsBwdPrimEnabled ());
0 commit comments