Skip to content

Commit 0ebb3b5

Browse files
liym27Aurelius84
authored andcommitted
[cherry-pick]mv two function in conv op for good code style (#20116) test=release/1.6 (#20268)
* Delete PadFuntion, include padding.h instead. * move function(IsSymmetricPadding) from conv_cudnn_op.cu/conv_transpose_cudnn_op.cu to padding.h.
1 parent 29a88ad commit 0ebb3b5

File tree

3 files changed

+27
-65
lines changed

3 files changed

+27
-65
lines changed

paddle/fluid/operators/conv_cudnn_op.cu

+12-49
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ limitations under the License. */
2121
#include "paddle/fluid/operators/conv_cudnn_helper.h"
2222
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
2323
#include "paddle/fluid/operators/conv_op.h"
24+
#include "paddle/fluid/operators/math/padding.h"
2425
#include "paddle/fluid/platform/cudnn_helper.h"
2526
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
2627
#include "paddle/fluid/platform/float16.h"
@@ -59,44 +60,6 @@ static inline void GetNCDHW(const framework::DDim& dims,
5960
}
6061
}
6162

62-
static inline bool IsSymmetricPadding(const std::vector<int>& paddings,
63-
const int data_dim) {
64-
bool is_sys_pad = true;
65-
if (paddings.size() == data_dim * 2) {
66-
for (size_t i = 0; i < data_dim; ++i) {
67-
if (paddings[2 * i] != paddings[2 * i + 1]) {
68-
is_sys_pad = false;
69-
return is_sys_pad;
70-
}
71-
}
72-
}
73-
return is_sys_pad;
74-
}
75-
76-
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
77-
typename IndexType = Eigen::DenseIndex>
78-
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
79-
80-
template <typename DeviceContext, typename T, size_t D>
81-
static void PadFunction(const framework::ExecutionContext& context,
82-
const std::vector<int>& pads,
83-
const framework::Tensor& src, T pad_value,
84-
framework::Tensor* out) {
85-
Eigen::array<std::pair<int, int>, D> paddings;
86-
87-
for (size_t i = 0; i < paddings.size(); ++i) {
88-
paddings[i].first = pads[i * 2];
89-
paddings[i].second = pads[i * 2 + 1];
90-
}
91-
92-
auto src_tensor = EigenTensor<T, D>::From(src);
93-
auto out_tensor = EigenTensor<T, D>::From(*out);
94-
95-
auto& place =
96-
*context.template device_context<DeviceContext>().eigen_device();
97-
out_tensor.device(place) = src_tensor.pad(paddings, pad_value);
98-
}
99-
10063
template <typename DeviceContext, typename T, size_t D>
10164
static void Slice_2(const framework::ExecutionContext& context,
10265
const Tensor* input, Tensor* out,
@@ -192,7 +155,7 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
192155
in_data_dims, strides, ksize);
193156

194157
int data_dim = strides.size(); // 2d or 3d
195-
bool is_sys_pad = IsSymmetricPadding(paddings, data_dim);
158+
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
196159

197160
Tensor transformed_input;
198161
std::vector<int> padding_common(data_dim, 0);
@@ -225,12 +188,12 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
225188
T pad_value(0.0);
226189
switch (rank) {
227190
case 4: {
228-
PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
191+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
229192
ctx, input_pad, transformed_input_channel, pad_value,
230193
&transformed_input);
231194
} break;
232195
case 5: {
233-
PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
196+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
234197
ctx, input_pad, transformed_input_channel, pad_value,
235198
&transformed_input);
236199
} break;
@@ -404,7 +367,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
404367
// cuDNN only supports padding the same amount on every dimension.
405368
// So we create a new padded input tensor.
406369
int data_dim = strides.size(); // 2d or 3d
407-
bool is_sys_pad = IsSymmetricPadding(paddings, data_dim);
370+
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
408371
Tensor transformed_input(input->type());
409372
Tensor transformed_input_grad(input->type());
410373
std::vector<int> padding_common(data_dim, 0);
@@ -446,12 +409,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
446409
T pad_value(0.0);
447410
switch (rank) {
448411
case 4: {
449-
PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
412+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
450413
ctx, input_pad, transformed_input_channel, pad_value,
451414
&transformed_input);
452415
} break;
453416
case 5: {
454-
PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
417+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
455418
ctx, input_pad, transformed_input_channel, pad_value,
456419
&transformed_input);
457420
} break;
@@ -737,7 +700,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
737700
in_data_dims, strides, ksize);
738701

739702
int data_dim = strides.size(); // 2d or 3d
740-
bool is_sys_pad = IsSymmetricPadding(paddings, data_dim);
703+
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
741704
Tensor transformed_X(X->type());
742705
Tensor transformed_ddX(X->type());
743706

@@ -786,16 +749,16 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
786749
T pad_value(0.0);
787750
switch (rank) {
788751
case 4: {
789-
PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
752+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
790753
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
791-
PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
754+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
792755
ctx, input_pad, transformed_ddX_channel, pad_value,
793756
&transformed_ddX);
794757
} break;
795758
case 5: {
796-
PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
759+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
797760
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
798-
PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
761+
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
799762
ctx, input_pad, transformed_ddX_channel, pad_value,
800763
&transformed_ddX);
801764
} break;

paddle/fluid/operators/conv_transpose_cudnn_op.cu

+2-16
Original file line numberDiff line numberDiff line change
@@ -51,20 +51,6 @@ static void DataTranspose(const framework::ExecutionContext& ctx,
5151
transpose(dev_ctx, *input, output, axis);
5252
}
5353

54-
static inline bool IsSymmetricPadding(const std::vector<int>& paddings,
55-
const int data_dim) {
56-
bool is_sys_pad = true;
57-
if (paddings.size() == data_dim * 2) {
58-
for (size_t i = 0; i < data_dim; ++i) {
59-
if (paddings[2 * i] != paddings[2 * i + 1]) {
60-
is_sys_pad = false;
61-
return is_sys_pad;
62-
}
63-
}
64-
}
65-
return is_sys_pad;
66-
}
67-
6854
template <typename T>
6955
class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
7056
public:
@@ -124,7 +110,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
124110
in_data_dims, strides, ksize);
125111

126112
int data_dim = strides.size(); // 2d or 3d
127-
bool is_sys_pad = IsSymmetricPadding(paddings, data_dim);
113+
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
128114

129115
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
130116
Tensor transformed_input;
@@ -373,7 +359,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
373359
in_data_dims, strides, ksize);
374360

375361
int data_dim = strides.size(); // 2d or 3d
376-
bool is_sys_pad = IsSymmetricPadding(paddings, data_dim);
362+
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
377363

378364
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
379365
Tensor transformed_output_grad;

paddle/fluid/operators/math/padding.h

+13
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,19 @@ void PaddingGradFunctor(int rank, const framework::ExecutionContext& context,
119119
}
120120
}
121121

122+
inline bool IsSymmetricPadding(const std::vector<int>& pads,
123+
const int data_dim) {
124+
bool is_sys_pad = true;
125+
if (pads.size() == data_dim * 2) {
126+
for (size_t i = 0; i < data_dim; ++i) {
127+
if (pads[2 * i] != pads[2 * i + 1]) {
128+
is_sys_pad = false;
129+
return is_sys_pad;
130+
}
131+
}
132+
}
133+
return is_sys_pad;
134+
}
122135
} // namespace math
123136
} // namespace operators
124137
} // namespace paddle

0 commit comments

Comments
 (0)