Skip to content

Commit 5ada98b

Browse files
authored
[clang-tidy] NO.6 enable modernize-avoid-c-arrays step: 2 (#55954)
1 parent ae88111 commit 5ada98b

File tree

74 files changed

+429
-319
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

74 files changed

+429
-319
lines changed

.clang-tidy

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ bugprone-unused-raii,
166166
-misc-unused-alias-decls,
167167
-misc-unused-using-decls,
168168
modernize-avoid-bind,
169-
-modernize-avoid-c-arrays,
169+
modernize-avoid-c-arrays,
170170
-modernize-deprecated-headers,
171171
-modernize-deprecated-ios-base-aliases,
172172
modernize-loop-convert,

paddle/fluid/framework/data_type.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,13 +150,15 @@ proto::VarType::Type PromoteTypesIfComplexExists(
150150
// Here is a complete rules table, but some rules are not used.
151151
// It is still written this way because array accessing is still
152152
// more efficient than if-else
153+
// NOLINTBEGIN(*-avoid-c-arrays)
153154
static constexpr proto::VarType::Type promote_types_table[4][4] = {
154155
/* f4 f8 c4 c8*/
155156
/* f4 */ {f4, f8, c4, c8},
156157
/* f8 */ {f8, f8, c8, c8},
157158
/* c4 */ {c4, c8, c4, c8},
158159
/* c8 */ {c8, c8, c8, c8},
159160
};
161+
// NOLINTEND(*-avoid-c-arrays)
160162

161163
return promote_types_table[type_an][type_bn];
162164
}

paddle/fluid/framework/details/fused_all_reduce_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
#include "paddle/phi/backends/device_memory_aligment.h"
2222
#include "paddle/phi/core/flags.h"
2323

24-
DEFINE_bool(skip_fused_all_reduce_check, false, "");
24+
DEFINE_bool(skip_fused_all_reduce_check, false, ""); // NOLINT
2525
PHI_DECLARE_bool(allreduce_record_one_event);
2626

2727
namespace paddle {

paddle/fluid/framework/device_worker.cc

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ limitations under the License. */
1414

1515
#include "paddle/fluid/framework/device_worker.h"
1616

17+
#include <array>
1718
#include <chrono>
1819
#include "paddle/fluid/framework/convert_utils.h"
1920
namespace phi {
@@ -90,7 +91,7 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor,
9091
std::string& out_val, // NOLINT
9192
char separator,
9293
bool need_leading_separator) {
93-
char buf[MAX_FLOAT_BUFF_SIZE];
94+
std::array<char, MAX_FLOAT_BUFF_SIZE> buf;
9495
auto count = tensor->numel();
9596
if (start < 0 || end > count) {
9697
VLOG(3) << "access violation";
@@ -104,8 +105,8 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor,
104105
tensor->data<float>()[i] < FLOAT_EPS) {
105106
out_val += "0";
106107
} else {
107-
sprintf(buf, "%.9f", tensor->data<float>()[i]); // NOLINT
108-
out_val += buf;
108+
sprintf(buf.data(), "%.9f", tensor->data<float>()[i]); // NOLINT
109+
out_val += buf.data();
109110
}
110111
}
111112
}

paddle/fluid/framework/hogwild_worker.cc

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

15+
#include <array>
1516
#include <ctime>
1617

1718
#include "paddle/fluid/framework/barrier.h"
@@ -425,12 +426,14 @@ void HogwildWorker::PrintFetchVars() {
425426
if (thread_id_ == 0 && batch_num_ % batch_per_print == 0) {
426427
time_t curtime;
427428
time(&curtime);
428-
char mbstr[80];
429-
std::strftime(
430-
mbstr, sizeof(mbstr), "%Y-%m-%d %H:%M:%S", std::localtime(&curtime));
429+
std::array<char, 80> mbstr;
430+
std::strftime(mbstr.data(),
431+
sizeof(mbstr),
432+
"%Y-%m-%d %H:%M:%S",
433+
std::localtime(&curtime));
431434

432435
std::stringstream ss;
433-
ss << "time: [" << mbstr << "], ";
436+
ss << "time: [" << mbstr.data() << "], ";
434437
ss << "batch: [" << batch_num_ << "], ";
435438

436439
for (int i = 0; i < fetch_var_num; ++i) {

paddle/fluid/framework/io/shell.cc

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
#include <array>
1516
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
1617
#include "paddle/fluid/framework/io/shell.h"
1718

@@ -150,14 +151,14 @@ static int shell_popen_fork_internal(const char* real_cmd,
150151
}
151152

152153
static int read_from_pipe(FILE* fp, std::string* output) {
153-
char buf[4096];
154+
std::array<char, 4096> buf;
154155
while (1) {
155-
int n = fread(buf, 1, 4096, fp);
156+
int n = fread(buf.data(), 1, 4096, fp);
156157
if (n <= 0) {
157158
break;
158159
}
159160

160-
output->append(buf, n);
161+
output->append(buf.data(), n);
161162
}
162163

163164
if (!feof(fp)) {
@@ -249,8 +250,8 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
249250
}
250251

251252
static int shell_p2open_fork_internal(const char* real_cmd,
252-
int pipein_fds[2],
253-
int pipeout_fds[2]) {
253+
int pipein_fds[2], // NOLINT
254+
int pipeout_fds[2]) { // NOLINT
254255
#if defined(_WIN32) || defined(__APPLE__) || defined(PADDLE_ARM)
255256
return 0;
256257
#else

paddle/fluid/framework/ir/lock_free_optimize_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@ namespace paddle {
2525
namespace framework {
2626
namespace ir {
2727

28-
const char kSumGradOpName[] = "sum";
28+
const char kSumGradOpName[] = "sum"; // NOLINT
2929
// TODO(minqiyang): only support sgd at current time, please add
3030
// other optimizers later.
31-
const char kOptimizerType[] = "sgd";
31+
const char kOptimizerType[] = "sgd"; // NOLINT
3232

3333
void LockFreeOptimizePass::ApplyImpl(ir::Graph* graph) const {
3434
PADDLE_ENFORCE_NOT_NULL(

paddle/fluid/framework/ir/multi_batch_merge_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace paddle {
2323
namespace framework {
2424
namespace ir {
2525

26-
static const char kNumRepeats[] = "num_repeats";
26+
static const char kNumRepeats[] = "num_repeats"; // NOLINT
2727
typedef std::unordered_map<std::string, std::vector<ir::Node*>> SSAVarList;
2828

2929
ir::Node* SameNameVar(std::unordered_set<ir::Node*> all, ir::Node* target) {

paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ namespace {
5050
// all operators. NOTE that even we use a vector here, the operators is
5151
// unordered.
5252
typedef std::vector<details::OpHandleBase *> GraphOps;
53-
const char kGraphOps[] = "ops";
53+
const char kGraphOps[] = "ops"; // NOLINT
5454

5555
bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) {
5656
return PADDLE_GET_CONST(

paddle/fluid/framework/ir/node.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace ir {
2323
#if !defined(_WIN32) && (__cplusplus < 201703L)
2424
constexpr char Node::kControlDepVarName[];
2525
#else
26-
const char Node::kControlDepVarName[] = "__control_var";
26+
const char Node::kControlDepVarName[] = "__control_var"; // NOLINT
2727
#endif
2828

2929
std::unique_ptr<Node> CreateNodeForTest(const std::string &name,

paddle/fluid/framework/ir/pass.cc

100755100644
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ namespace paddle {
3636
namespace framework {
3737
namespace ir {
3838

39-
static const char kParamScopeAttr[] = "__param_scope__";
39+
static const char kParamScopeAttr[] = "__param_scope__"; // NOLINT
4040

4141
static const std::vector<std::string> support_subgraph_passes = {
4242
"simplify_with_basic_ops_pass",

paddle/fluid/framework/ir/runtime_context_cache_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ namespace framework {
2222
namespace ir {
2323

2424
void RuntimeContextCachePass::ApplyImpl(ir::Graph* graph) const {
25-
static constexpr char kNotAllowInferShapeCahce[] =
25+
static constexpr char kNotAllowInferShapeCahce[] = // NOLINT
2626
"@NOT_ALLOW_INFERSHAPE_CACHE@";
2727
VLOG(3) << "Applies Runtime Context Cache strategy.";
2828
for (const Node* n : graph->Nodes()) {

paddle/fluid/framework/operator.cc

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1227,7 +1227,7 @@ bool OpSupportGPU(const std::string& op_type) {
12271227
}
12281228

12291229
struct OperatorWithKernel::CacheImpl {
1230-
static const char kNotAllowInferShapeCahce[];
1230+
static const char kNotAllowInferShapeCahce[]; // NOLINT
12311231
explicit CacheImpl(phi::KernelContext* kernel_ctx,
12321232
RuntimeInferShapeContext* infer_shape_ctx,
12331233
const std::vector<phi::DenseTensor*>& tensors,
@@ -1273,8 +1273,9 @@ struct OperatorWithKernel::CacheImpl {
12731273
bool not_allow_infer_shape_cache_;
12741274
std::vector<phi::DDim> last_ddims_;
12751275
};
1276-
const char OperatorWithKernel::CacheImpl::kNotAllowInferShapeCahce[] =
1277-
"@NOT_ALLOW_INFERSHAPE_CACHE@";
1276+
const char // NOLINT
1277+
OperatorWithKernel::CacheImpl::kNotAllowInferShapeCahce[] =
1278+
"@NOT_ALLOW_INFERSHAPE_CACHE@";
12781279

12791280
static void CheckTensorNANOrInf(const std::string& op_type,
12801281
const std::string& name,

paddle/fluid/framework/prune.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,12 @@ limitations under the License. */
2323
namespace paddle {
2424
namespace framework {
2525

26-
const char kFeedOpType[] = "feed";
27-
const char kFetchOpType[] = "fetch";
26+
const char kFeedOpType[] = "feed"; // NOLINT
27+
const char kFetchOpType[] = "fetch"; // NOLINT
2828

29-
const char kRecurrent[] = "recurrent";
30-
const char kStates[] = "states";
31-
const char kExStates[] = "ex_states";
29+
const char kRecurrent[] = "recurrent"; // NOLINT
30+
const char kStates[] = "states"; // NOLINT
31+
const char kExStates[] = "ex_states"; // NOLINT
3232

3333
bool HasDependentInputVar(
3434
const proto::OpDesc& op_desc,

paddle/fluid/framework/tensor_util.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -531,8 +531,8 @@ void TensorToStream(std::ostream& os,
531531
#endif
532532
} else if (platform::is_custom_place(tensor.place())) {
533533
#ifdef PADDLE_WITH_CUSTOM_DEVICE
534-
constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB
535-
std::unique_ptr<char[]> buf(new char[kBufSize]);
534+
constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB
535+
std::unique_ptr<char[]> buf(new char[kBufSize]); // NOLINT
536536
auto& custom_device_context =
537537
static_cast<const platform::CustomDeviceContext&>(dev_ctx);
538538
platform::CPUPlace cpu;
@@ -598,7 +598,7 @@ void TensorFromStream(std::istream& is,
598598
// proto buffer
599599
int32_t size;
600600
is.read(reinterpret_cast<char*>(&size), sizeof(size));
601-
std::unique_ptr<char[]> buf(new char[size]);
601+
std::unique_ptr<char[]> buf(new char[size]); // NOLINT
602602
is.read(reinterpret_cast<char*>(buf.get()), size);
603603
PADDLE_ENFORCE_EQ(
604604
desc.ParseFromArray(buf.get(), size),
@@ -671,7 +671,7 @@ void TensorFromStream(std::istream& is,
671671
0,
672672
platform::errors::InvalidArgument(
673673
"phi::DenseTensor desc size should >= 0"));
674-
std::unique_ptr<char[]> buf(new char[size]);
674+
std::unique_ptr<char[]> buf(new char[size]); // NOLINT
675675
is.read(reinterpret_cast<char*>(buf.get()), size);
676676
PADDLE_ENFORCE_EQ(
677677
desc.ParseFromArray(buf.get(), size),

paddle/fluid/framework/tensor_util_test.cc

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
// limitations under the License.
1414

1515
#include <gtest/gtest.h>
16+
#include <array>
1617
#include <cmath>
1718

1819
#include "paddle/fluid/framework/tensor_util.h"
@@ -28,8 +29,8 @@ TEST(TensorCopy, Tensor) {
2829
int* src_ptr = src_tensor.mutable_data<int>(phi::make_ddim({3, 3}),
2930
platform::CPUPlace());
3031

31-
int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
32-
memcpy(src_ptr, arr, 9 * sizeof(int));
32+
std::array<int, 9> arr = {1, 2, 3, 4, 5, 6, 7, 8, 9};
33+
memcpy(src_ptr, arr.data(), 9 * sizeof(int));
3334
src_tensor.set_layout(DataLayout::kAnyLayout);
3435

3536
auto cpu_place = new platform::CPUPlace();
@@ -467,7 +468,7 @@ TEST(TensorIsfinite, CPU) {
467468

468469
TEST(Tensor, FromAndToStream) {
469470
phi::DenseTensor src_tensor;
470-
int array[6] = {1, 2, 3, 4, 5, 6};
471+
std::array<int, 6> array = {1, 2, 3, 4, 5, 6};
471472
src_tensor.Resize({2, 3});
472473
int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace());
473474
for (int i = 0; i < 6; ++i) {

paddle/fluid/inference/analysis/passes/ir_params_sync_among_devices_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
#include "paddle/fluid/platform/place.h"
2828
#include "paddle/phi/core/dense_tensor.h"
2929

30-
DEFINE_bool(
30+
DEFINE_bool( // NOLINT
3131
custom_model_save_cpu,
3232
false,
3333
"Keep old mode for developers, the model is saved on cpu not device.");

paddle/fluid/inference/api/api_impl.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ limitations under the License. */
2626
#include "paddle/fluid/platform/place.h"
2727
#include "paddle/fluid/platform/profiler.h"
2828

29-
DEFINE_bool(profile, false, "Turn on profiler for fluid");
29+
DEFINE_bool(profile, false, "Turn on profiler for fluid"); // NOLINT
3030

3131
namespace paddle {
3232
namespace {

paddle/fluid/inference/io.cc

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,9 @@ limitations under the License. */
2929
// phi
3030
#include "paddle/phi/kernels/declarations.h"
3131

32-
DEFINE_string(devices, "", "The devices to be used which is joined by comma.");
32+
DEFINE_string(devices, // NOLINT
33+
"",
34+
"The devices to be used which is joined by comma.");
3335
DEFINE_int32(math_num_threads,
3436
1,
3537
"Number of threads used to run math functions.");

paddle/fluid/ir/dialect/kernel_op.cc

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,10 @@
2020
namespace paddle {
2121
namespace dialect {
2222

23-
const char* PhiKernelOp::attributes_name[attributes_num] = {
24-
"op_name", "kernel_name", "kernel_key"};
23+
const char* PhiKernelOp::attributes_name[attributes_num] = { // NOLINT
24+
"op_name",
25+
"kernel_name",
26+
"kernel_key"};
2527

2628
void PhiKernelOp::Verify() {
2729
VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp.";

paddle/fluid/ir_adaptor/translator/op_translator.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ using InputHandlerFn = std::function<ir::OpResult(ir::IrContext*,
6969
ir::Program*)>;
7070
using AttributeHandlerFn = std::function<ir::Attribute(
7171
ir::IrContext*, const OpDesc&, const OpAttributeInfo&)>;
72-
constexpr char kTargetDialectPrefix[] = "pd.";
73-
constexpr char kEmptyVarName[] = "@EMPTY@";
72+
constexpr char kTargetDialectPrefix[] = "pd."; // NOLINT
73+
constexpr char kEmptyVarName[] = "@EMPTY@"; // NOLINT
7474

7575
static const std::unordered_set<std::string> special_non_inplace_ops = {};
7676

0 commit comments

Comments
 (0)