Skip to content

Commit d64a8f1

Browse files
committed
add prim dynamic unit test
1 parent 9653144 commit d64a8f1

File tree

1 file changed

+48
-1
lines changed

1 file changed

+48
-1
lines changed

paddle/fluid/prim/tests/test_eager_prim.cc

+48-1
Original file line numberDiff line numberDiff line change
@@ -35,13 +35,22 @@ PD_DECLARE_KERNEL(tanh_grad, CPU, ALL_LAYOUT);
3535
PD_DECLARE_KERNEL(pow, CPU, ALL_LAYOUT);
3636
PD_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT);
3737
PD_DECLARE_KERNEL(multiply, CPU, ALL_LAYOUT);
38+
PD_DECLARE_KERNEL(bitwise_and, CPU, ALL_LAYOUT);
39+
PD_DECLARE_KERNEL(bitwise_or, CPU, ALL_LAYOUT);
40+
PD_DECLARE_KERNEL(bitwise_xor, CPU, ALL_LAYOUT);
41+
PD_DECLARE_KERNEL(bitwise_not, CPU, ALL_LAYOUT);
3842
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
3943
PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT);
4044
PD_DECLARE_KERNEL(tanh, GPU, ALL_LAYOUT);
4145
PD_DECLARE_KERNEL(tanh_grad, GPU, ALL_LAYOUT);
4246
PD_DECLARE_KERNEL(pow, GPU, ALL_LAYOUT);
4347
PD_DECLARE_KERNEL(scale, GPU, ALL_LAYOUT);
4448
PD_DECLARE_KERNEL(multiply, KPS, ALL_LAYOUT);
49+
PD_DECLARE_KERNEL(bitwise_and, KPS, ALL_LAYOUT);
50+
PD_DECLARE_KERNEL(bitwise_or, KPS, ALL_LAYOUT);
51+
PD_DECLARE_KERNEL(bitwise_xor, KPS, ALL_LAYOUT);
52+
PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT);
53+
4554
#endif
4655

4756
namespace paddle {
@@ -81,7 +90,7 @@ TEST(EagerPrim, TanhBackwardTest) {
8190

8291
paddle::experimental::Tensor out1 = tanh_ad_func(tensor1);
8392
std::vector<paddle::experimental::Tensor> outs1 = {out1};
84-
// Disable prim
93+
// Enable prim
8594
PrimCommonUtils::SetBwdPrimEnabled(true);
8695
ASSERT_TRUE(PrimCommonUtils::IsBwdPrimEnabled());
8796
// 4. Run Backward
@@ -104,6 +113,44 @@ TEST(EagerPrim, TanhBackwardTest) {
104113
->data<float>()[0]);
105114
}
106115

116+
TEST(EagerPrim, LogicalOperantsTest) {
117+
// 1. Initialized
118+
eager_test::InitEnv(paddle::platform::CPUPlace());
119+
FLAGS_tensor_operants_mode = "eager";
120+
paddle::prim::InitTensorOperants();
121+
// 2. pre
122+
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
123+
paddle::experimental::Tensor tensor0 =
124+
::egr::egr_utils_api::CreateTensorWithValue(ddim,
125+
paddle::platform::CPUPlace(),
126+
phi::DataType::INT32,
127+
phi::DataLayout::NCHW,
128+
1 /*value*/,
129+
true /*is_leaf*/);
130+
::egr::egr_utils_api::RetainGradForTensor(tensor0);
131+
paddle::experimental::Tensor tensor1 =
132+
::egr::egr_utils_api::CreateTensorWithValue(ddim,
133+
paddle::platform::CPUPlace(),
134+
phi::DataType::INT32,
135+
phi::DataLayout::NCHW,
136+
0 /*value*/,
137+
true /*is_leaf*/);
138+
::egr::egr_utils_api::RetainGradForTensor(tensor1);
139+
// 3. Run Forward once
140+
paddle::experimental::Tensor out0 = tensor0 & tensor1;
141+
paddle::experimental::Tensor out1 = bitwise_and_ad_func(tensor0, tensor1);
142+
EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
143+
out0 = tensor0 | tensor1;
144+
out1 = bitwise_or_ad_func(tensor0, tensor1);
145+
EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
146+
out0 = tensor0 ^ tensor1;
147+
out1 = bitwise_xor_ad_func(tensor0, tensor1);
148+
EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
149+
out0 = ~tensor0;
150+
out1 = bitwise_not_ad_func(tensor0);
151+
EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
152+
}
153+
107154
TEST(EagerPrim, TestFlags) {
108155
PrimCommonUtils::SetBwdPrimEnabled(true);
109156
ASSERT_TRUE(PrimCommonUtils::IsBwdPrimEnabled());

0 commit comments

Comments
 (0)