Skip to content

Commit 419188d

Browse files
danleifenggongweibao
authored andcommitted
[cherry-pick]add assertions on whether elementwise_div divison is zero (#20713)
1 parent f8b8811 commit 419188d

File tree

3 files changed

+56
-9
lines changed

3 files changed

+56
-9
lines changed

paddle/fluid/operators/elementwise/elementwise_div_op.cc

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,19 +32,16 @@ struct SameDimsElemwiseDiv<
3232
}
3333
};
3434

35+
// use default div function for int32/int64 type because of divison zero
36+
// checking.
3537
template <typename T>
3638
struct SameDimsElemwiseDiv<
3739
platform::CPUDeviceContext, T,
3840
typename std::enable_if<!std::is_floating_point<T>::value>::type> {
3941
void operator()(const framework::ExecutionContext &ctx,
4042
const framework::Tensor *x, const framework::Tensor *y,
4143
framework::Tensor *z) {
42-
auto eigen_x = framework::EigenVector<T>::Flatten(*x);
43-
auto eigen_y = framework::EigenVector<T>::Flatten(*y);
44-
auto eigen_z = framework::EigenVector<T>::Flatten(*z);
45-
auto &place = *ctx.template device_context<platform::CPUDeviceContext>()
46-
.eigen_device();
47-
eigen_z.device(place) = eigen_x / eigen_y;
44+
default_elementwise_div<platform::CPUDeviceContext, T>(ctx, x, y, z);
4845
}
4946
};
5047

paddle/fluid/operators/elementwise/elementwise_op_function.cu.h

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
11
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2+
23
Licensed under the Apache License, Version 2.0 (the "License");
34
you may not use this file except in compliance with the License.
45
You may obtain a copy of the License at
6+
57
http://www.apache.org/licenses/LICENSE-2.0
8+
69
Unless required by applicable law or agreed to in writing, software
710
distributed under the License is distributed on an "AS IS" BASIS,
811
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -12,9 +15,9 @@ limitations under the License. */
1215
#pragma once
1316

1417
#include <glog/logging.h>
18+
#include "paddle/fluid/platform/enforce.h"
1519
#include "paddle/fluid/platform/float16.h"
1620
#include "paddle/fluid/platform/hostdevice.h"
17-
1821
#define PADDLE_CUDA_THREAD_SIZE 512
1922

2023
#ifdef PADDLE_WITH_CUDA
@@ -29,11 +32,14 @@ limitations under the License. */
2932
#define __h2div h2div
3033
#endif
3134

35+
#define DIV_ERROR_INFO \
36+
"InvalidArgumentError: Integer division by zero encountered in " \
37+
"divide.Please check.\n"
3238
namespace paddle {
3339
namespace operators {
3440

3541
#define DEFINE_SIMPLE_BINARY_FUNCTOR(Func, expr) \
36-
template <typename T> \
42+
template <typename T, class Enable = void> \
3743
struct Func##Functor { \
3844
inline HOSTDEVICE T operator()(const T& a, const T& b) const { \
3945
return a expr b; \
@@ -46,8 +52,18 @@ DEFINE_SIMPLE_BINARY_FUNCTOR(Mul, *)
4652
DEFINE_SIMPLE_BINARY_FUNCTOR(Div, /)
4753
#undef DEFINE_SIMPLE_BINARY_FUNCTOR
4854

55+
// special div functor for int32/int64. check divison has a zero
56+
template <typename T>
57+
struct DivFunctor<T,
58+
typename std::enable_if<std::is_integral<T>::value>::type> {
59+
inline HOSTDEVICE T operator()(const T& a, const T& b) const {
60+
PADDLE_ENFORCE(b != 0, DIV_ERROR_INFO);
61+
return a / b;
62+
}
63+
};
64+
4965
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTOR(Func, expr) \
50-
template <typename T> \
66+
template <typename T, class Enable = void> \
5167
struct Func##RangeFunctor { \
5268
Func##RangeFunctor(const T* x, const T* y, T* z) : x_(x), y_(y), z_(z) {} \
5369
inline HOSTDEVICE void operator()(size_t id) const { \
@@ -63,6 +79,20 @@ DEFINE_SIMPLE_CUDA_BINARY_FUNCTOR(Mul, *)
6379
DEFINE_SIMPLE_CUDA_BINARY_FUNCTOR(Div, /)
6480
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTOR
6581

82+
// special div functor for int32/int64. check divison has a zero
83+
template <typename T>
84+
struct DivRangeFunctor<
85+
T, typename std::enable_if<std::is_integral<T>::value>::type> {
86+
DivRangeFunctor(const T* x, const T* y, T* z) : x_(x), y_(y), z_(z) {}
87+
inline HOSTDEVICE void operator()(size_t id) const {
88+
PADDLE_ENFORCE(y_[id] != 0, DIV_ERROR_INFO);
89+
z_[id] = x_[id] / y_[id];
90+
}
91+
const T* x_;
92+
const T* y_;
93+
T* z_;
94+
};
95+
6696
#ifdef PADDLE_CUDA_FP16
6797
inline DEVICE half2 half2_add(const half2& a, const half2& b) {
6898
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530

python/paddle/fluid/tests/unittests/test_elementwise_div_op.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,26 @@ def setUp(self):
151151
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
152152

153153

154+
class TestElementwiseDivOp_INT(OpTest):
155+
def setUp(self):
156+
self.op_type = "elementwise_div"
157+
self.dtype = np.int32
158+
self.init_dtype()
159+
self.inputs = {
160+
'X': np.random.randint(
161+
1, 5, size=[2, 3]).astype(self.dtype),
162+
'Y': np.random.randint(
163+
1, 5, size=[2, 3]).astype(self.dtype)
164+
}
165+
self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']}
166+
167+
def test_check_output(self):
168+
self.check_output()
169+
170+
def init_dtype(self):
171+
pass
172+
173+
154174
class TestElementwiseDivOpFp16(ElementwiseDivOp):
155175
def init_dtype(self):
156176
self.dtype = np.float16

0 commit comments

Comments
 (0)