Skip to content

[Tensor Operants & Prim-Relevant] Tensor supports logical operants #50983

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Mar 1, 2023
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions paddle/fluid/prim/api/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
- subtract
- multiply
- divide
- bitwise_and
- bitwise_not
- bitwise_or
- bitwise_xor
- unsqueeze
- exp
- scale
Expand Down
12 changes: 12 additions & 0 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,14 @@ class PADDLE_API Tensor final {

Tensor operator-() const;

Tensor operator~() const;

Tensor operator&(const Tensor& other) const;

Tensor operator|(const Tensor& other) const;

Tensor operator^(const Tensor& other) const;

/* Part 8: Autograd methods */

/**
Expand Down Expand Up @@ -677,6 +685,10 @@ class PADDLE_API Tensor final {
Tensor divide(const Scalar& y) const;
Tensor multiply(const Scalar& y) const;
Tensor subtract(const Scalar& y) const;
Tensor bitwise_and(const Tensor& y) const;
Tensor bitwise_or(const Tensor& y) const;
Tensor bitwise_xor(const Tensor& y) const;
Tensor bitwise_not() const;
Tensor pow(const Tensor& y) const;
Tensor pow(const Scalar& y) const;

Expand Down
17 changes: 17 additions & 0 deletions paddle/phi/api/yaml/generator/tensor_operants_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

indent = " "

# E.g.: Prim uses `elementwise_pow + fill_constant` to replace `pow`, so that we use this map to generate the `pow` signature when iterating over `elementwise_pow` API.
specific_ops_map = {"elementwise_pow": "pow"}


Expand Down Expand Up @@ -149,6 +150,22 @@ class TensorOperantsBase {
return scale(-1.0, 0.0, true);
}

Tensor Tensor::operator~() const {
return bitwise_not();
}

Tensor Tensor::operator&(const Tensor &other) const {
return bitwise_and(other);
}

Tensor Tensor::operator|(const Tensor &other) const {
return bitwise_or(other);
}

Tensor Tensor::operator^(const Tensor &other) const {
return bitwise_xor(other);
}

Tensor Tensor::pow(const Tensor& y) const {
return paddle::OperantsManager::Instance().pow(static_cast<const Tensor &>(*this), y);
}
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/api/yaml/tensor_operants.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
# Attach operants to Tensor, this file should be consistent with the declaration in `tensor.h`
# Assure this file is the subset of `paddle/fluid/prim/api/api.yaml`
- add
- subtract
- multiply
- divide
- bitwise_and
- bitwise_not
- bitwise_or
- bitwise_xor
- unsqueeze
- exp
- scale
Expand Down
183 changes: 183 additions & 0 deletions python/paddle/fluid/tests/custom_op/custom_tensor_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,37 @@ PD_BUILD_GRAD_OP(custom_scalar_add)
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(ScalarAddBackward));

// y = 1 + x
std::vector<paddle::Tensor> LeftScalarAddForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
return {1 + x};
} else {
PD_THROW("Not implemented.");
}
}

// dy / dx = 1 * grad_out
std::vector<paddle::Tensor> LeftScalarAddBackward(
const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
if (x.is_cpu() || x.is_gpu()) {
return {1 * grad_out};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_left_scalar_add)
.Inputs({"X"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(LeftScalarAddForward));

PD_BUILD_GRAD_OP(custom_left_scalar_add)
.Inputs({"X", "Out", paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(LeftScalarAddBackward));

// y = x - 1
std::vector<paddle::Tensor> SubtractForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
Expand Down Expand Up @@ -141,6 +172,37 @@ PD_BUILD_GRAD_OP(custom_scalar_subtract)
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(ScalarSubtractBackward));

// y = - 1 + x
std::vector<paddle::Tensor> LeftScalarSubtractForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
return {-1 + x};
} else {
PD_THROW("Not implemented.");
}
}

// dy / dx = 1 * grad_out
std::vector<paddle::Tensor> LeftScalarSubtractBackward(
const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
if (x.is_cpu() || x.is_gpu()) {
return {1 * grad_out};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_left_scalar_subtract)
.Inputs({"X"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(LeftScalarSubtractForward));

PD_BUILD_GRAD_OP(custom_left_scalar_subtract)
.Inputs({"X", "Out", paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(LeftScalarSubtractBackward));

// y = x * 5
std::vector<paddle::Tensor> MultiplyForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
Expand Down Expand Up @@ -206,6 +268,37 @@ PD_BUILD_GRAD_OP(custom_scalar_multiply)
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(ScalarMultiplyBackward));

// y = 5 * x
std::vector<paddle::Tensor> LeftScalarMultiplyForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
return {5 * x};
} else {
PD_THROW("Not implemented.");
}
}

// dy / dx = 5 * grad_out
std::vector<paddle::Tensor> LeftScalarMultiplyBackward(
const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
if (x.is_cpu() || x.is_gpu()) {
return {5 * grad_out};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_left_scalar_multiply)
.Inputs({"X"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(LeftScalarMultiplyForward));

PD_BUILD_GRAD_OP(custom_left_scalar_multiply)
.Inputs({"X", "Out", paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(LeftScalarMultiplyBackward));

// y = 1 / x
std::vector<paddle::Tensor> DivideForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
Expand Down Expand Up @@ -270,3 +363,93 @@ PD_BUILD_GRAD_OP(custom_scalar_divide)
.Inputs({"X", "Out", paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(ScalarDivideBackward));

// y = 1 / x
std::vector<paddle::Tensor> LeftScalarDivideForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
return {1 / x};
} else {
PD_THROW("Not implemented.");
}
}

// dy / dx = -grad_out / (x * x)
std::vector<paddle::Tensor> LeftScalarDivideBackward(
const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
if (x.is_cpu() || x.is_gpu()) {
return {-grad_out / (x * x)};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_left_scalar_divide)
.Inputs({"X"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(LeftScalarDivideForward));

PD_BUILD_GRAD_OP(custom_left_scalar_divide)
.Inputs({"X", "Out", paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(LeftScalarDivideBackward));

// out = x & y
std::vector<paddle::Tensor> AndForward(const paddle::Tensor& x,
const paddle::Tensor& y) {
if (x.is_cpu() || x.is_gpu()) {
return {x & y};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_logical_and)
.Inputs({"X", "Y"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(AndForward));

// out = x | y
std::vector<paddle::Tensor> OrForward(const paddle::Tensor& x,
const paddle::Tensor& y) {
if (x.is_cpu() || x.is_gpu()) {
return {x | y};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_logical_or)
.Inputs({"X", "Y"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(OrForward));

// out = x ^ y
std::vector<paddle::Tensor> XorForward(const paddle::Tensor& x,
const paddle::Tensor& y) {
if (x.is_cpu() || x.is_gpu()) {
return {x ^ y};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_logical_xor)
.Inputs({"X", "Y"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(XorForward));

// out = ~x
std::vector<paddle::Tensor> NotForward(const paddle::Tensor& x) {
if (x.is_cpu() || x.is_gpu()) {
return {~x};
} else {
PD_THROW("Not implemented.");
}
}

PD_BUILD_OP(custom_logical_not)
.Inputs({"X"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(NotForward));
31 changes: 31 additions & 0 deletions python/paddle/fluid/tests/custom_op/test_custom_tensor_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,13 @@ def test_all(self):
self.divide = self.custom_module.custom_scalar_divide
self._test_static()
self._test_dynamic()
self.add = self.custom_module.custom_left_scalar_add
self.subtract = self.custom_module.custom_left_scalar_subtract
self.multiply = self.custom_module.custom_left_scalar_multiply
self.divide = self.custom_module.custom_left_scalar_divide
self._test_static()
self._test_dynamic()
self._test_logical_operants()

def _test_static(self):
for device in self.devices:
Expand Down Expand Up @@ -324,6 +331,30 @@ def _test_dynamic(self):
)
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8)

def _test_logical_operants(self):
for device in self.devices:
paddle.set_device(device)
np_x = paddle.randint(0, 2, [4, 8])
x = paddle.to_tensor(np_x, dtype="int32")
np_y = paddle.randint(0, 2, [4, 8])
y = paddle.to_tensor(np_y, dtype="int32")

out = self.custom_module.custom_logical_and(x, y)
pd_out = paddle.bitwise_and(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy())

out = self.custom_module.custom_logical_or(x, y)
pd_out = paddle.bitwise_or(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy())

out = self.custom_module.custom_logical_xor(x, y)
pd_out = paddle.bitwise_xor(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy())

out = self.custom_module.custom_logical_not(x)
pd_out = paddle.bitwise_not(x)
np.testing.assert_equal(out.numpy(), pd_out.numpy())


if __name__ == '__main__':
unittest.main()