Skip to content

【PIR API adaptor No.15-17】 Migrate RandomHorizontalFlip、RandomVerticalFlip、RandomErasing into pir #65499

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
Jul 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 14 additions & 16 deletions python/paddle/static/nn/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -1420,22 +1420,20 @@ def get_expected_precision(out_with_blocks):
return new_outs

if all(isinstance(out, paddle.pir.Value) for out in outs):
if in_pir_mode():
amp_attrs = core._get_amp_attrs()
amp_level = amp_attrs._amp_level
apply_amp_level_list = [
core.AmpLevel.O0,
core.AmpLevel.O1,
core.AmpLevel.O2,
]
if (amp_level in apply_amp_level_list) and (
not all_has_same_dtype(outs)
):
warnings.warn(
f"Return results from different branches in cond has different type: true value is '{outs[0]}' and false value is '{outs[1]}', "
"so we will promote the lower precision to the higher one."
)
return promote_precision(out_with_blocks)
amp_attrs = core._get_amp_attrs()
amp_level = amp_attrs._amp_level
apply_amp_level_list = [
core.AmpLevel.O1,
core.AmpLevel.O2,
]
if (amp_level in apply_amp_level_list) and (
not all_has_same_dtype(outs)
):
warnings.warn(
f"Return results from different branches in cond has different dtype: true value dtype is '{outs[0].dtype}' and false value dtype is '{outs[1].dtype}', "
"so we will promote the lower precision to the higher one."
)
return promote_precision(out_with_blocks)
return outs

if all(arg is None for arg in outs):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -5187,7 +5187,7 @@ def cond(i, _):
return paddle.static.nn.cond(
is_finite,
lambda: paddle.less_than(i, max_squaring),
lambda: paddle.full((), False),
lambda: paddle.full((), False, dtype=paddle.bool),
)

def body(i, result):
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/vision/transforms/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from paddle._typing import unreached

from ...base.framework import Variable
from ...base.libpaddle.pir import Value
from . import (
functional_cv2 as F_cv2,
functional_pil as F_pil,
Expand Down Expand Up @@ -64,7 +65,7 @@ def _is_tensor_image(img: _ImageDataType) -> TypeGuard[Tensor]:
"""
Return True if img is a Tensor for dynamic mode or Variable for static graph mode.
"""
return isinstance(img, (paddle.Tensor, Variable))
return isinstance(img, (paddle.Tensor, Variable, Value))


def _is_numpy_image(img: _ImageDataType) -> TypeGuard[npt.NDArray[Any]]:
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/vision/transforms/functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,14 @@
import paddle.nn.functional as F

from ...base.framework import Variable
from ...base.libpaddle.pir import Value

__all__ = []


def _assert_image_tensor(img, data_format):
if (
not isinstance(img, (paddle.Tensor, Variable))
not isinstance(img, (paddle.Tensor, Variable, Value))
or img.ndim < 3
or img.ndim > 4
or data_format.lower() not in ('chw', 'hwc')
Expand Down
47 changes: 36 additions & 11 deletions python/paddle/vision/transforms/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,13 @@ def _static_get_param(self, image, attempts=10):
w = paddle.ones([1], dtype="int32") * (width + 1)

def cond(counter, ten, i, j, h, w):
return (counter < ten) and (w > width or h > height)
return paddle.logical_and(
counter < ten,
paddle.logical_or(
w > width,
h > height,
),
)

def body(counter, ten, i, j, h, w):
target_area = (
Expand All @@ -638,15 +644,21 @@ def body(counter, ten, i, j, h, w):
)

i = paddle.static.nn.cond(
0 < w <= width and 0 < h <= height,
paddle.logical_and(
paddle.logical_and(0 < h, h <= height),
paddle.logical_and(0 < w, w <= width),
),
lambda: paddle.uniform(shape=[1], min=0, max=height - h).astype(
"int32"
),
lambda: i,
)

j = paddle.static.nn.cond(
0 < w <= width and 0 < h <= height,
paddle.logical_and(
paddle.logical_and(0 < h, h <= height),
paddle.logical_and(0 < w, w <= width),
),
lambda: paddle.uniform(shape=[1], min=0, max=width - w).astype(
"int32"
),
Expand Down Expand Up @@ -677,7 +689,7 @@ def central_crop(width, height):
lambda: paddle.static.nn.cond(
in_ratio > self.ratio[1],
lambda: [
paddle.round(height * self.ratio[1]),
paddle.round(height * self.ratio[1]).astype("int32"),
height.astype("int32"),
],
lambda: [width.astype("int32"), height.astype("int32")],
Expand All @@ -689,7 +701,10 @@ def central_crop(width, height):
return i, j, h, w, counter

return paddle.static.nn.cond(
0 < w <= width and 0 < h <= height,
paddle.logical_and(
paddle.logical_and(0 < h, h <= height),
paddle.logical_and(0 < w, w <= width),
),
lambda: [i, j, h, w, counter],
lambda: central_crop(width, height),
)
Expand Down Expand Up @@ -2188,7 +2203,13 @@ def _static_get_param(self, img, scale, ratio, value):
log_ratio = np.log(np.array(ratio))

def cond(counter, ten, erase_h, erase_w):
return counter < ten and (erase_h >= h or erase_w >= w)
return paddle.logical_and(
counter < ten,
paddle.logical_or(
erase_h >= h,
erase_w > w,
),
)

def body(counter, ten, erase_h, erase_w):
erase_area = (
Expand Down Expand Up @@ -2228,31 +2249,35 @@ def body(counter, ten, erase_h, erase_w):

zero = paddle.zeros([1]).astype("int32")
top = paddle.static.nn.cond(
erase_h < h and erase_w < w,
paddle.logical_and(erase_h < h, erase_w < w),
lambda: paddle.uniform(
shape=[1], min=0, max=h - erase_h + 1
).astype("int32"),
lambda: zero,
)

left = paddle.static.nn.cond(
erase_h < h and erase_w < w,
paddle.logical_and(erase_h < h, erase_w < w),
lambda: paddle.uniform(
shape=[1], min=0, max=w - erase_w + 1
).astype("int32"),
lambda: zero,
)

erase_h = paddle.static.nn.cond(
erase_h < h and erase_w < w, lambda: erase_h, lambda: h
paddle.logical_and(erase_h < h, erase_w < w),
lambda: erase_h,
lambda: h,
)

erase_w = paddle.static.nn.cond(
erase_h < h and erase_w < w, lambda: erase_w, lambda: w
paddle.logical_and(erase_h < h, erase_w < w),
lambda: erase_w,
lambda: w,
)

v = paddle.static.nn.cond(
erase_h < h and erase_w < w, lambda: v, lambda: img
paddle.logical_and(erase_h < h, erase_w < w), lambda: v, lambda: img
)

return top, left, erase_h, erase_w, v, counter
Expand Down
21 changes: 10 additions & 11 deletions test/legacy_test/test_transforms_static.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,13 @@ def setUp(self):
np.float32
)
self.set_trans_api()
self.init_dy_res()

def init_dy_res(self):
# Obtain the dynamic transform result first before test_transform.
self.dy_res = self.dynamic_transform()
if isinstance(self.dy_res, paddle.Tensor):
self.dy_res = self.dy_res.numpy()

def get_shape(self):
return (3, 64, 64)
Expand Down Expand Up @@ -59,12 +66,8 @@ def static_transform(self):
return res[0]

def test_transform(self):
dy_res = self.dynamic_transform()
if isinstance(dy_res, paddle.Tensor):
dy_res = dy_res.numpy()
st_res = self.static_transform()

np.testing.assert_almost_equal(dy_res, st_res)
np.testing.assert_almost_equal(self.dy_res, st_res)


class TestResize(TestTransformUnitTestBase):
Expand Down Expand Up @@ -138,10 +141,9 @@ def assert_test_random_equal(self, res, eps=1e-4):
assert not res_assert

def test_transform(self):
dy_res = self.dynamic_transform().numpy()
st_res = self.static_transform()

self.assert_test_random_equal(dy_res)
self.assert_test_random_equal(self.dy_res)
self.assert_test_random_equal(st_res)


Expand Down Expand Up @@ -180,12 +182,9 @@ def set_trans_api(self):
)

def test_transform(self):
dy_res = self.dynamic_transform()
if isinstance(dy_res, paddle.Tensor):
dy_res = dy_res.numpy()
st_res = self.static_transform()

self.assert_test_erasing(dy_res)
self.assert_test_erasing(self.dy_res)
self.assert_test_erasing(st_res)

def assert_test_erasing(self, arr):
Expand Down