Skip to content

Commit 44e0393

Browse files
authored
bump black to 2023 style (#54523)
1 parent e73ddd6 commit 44e0393

File tree

452 files changed

+119
-774
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

452 files changed

+119
-774
lines changed

.cmake-format.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
# Options affecting formatting.
1717
# -----------------------------
1818
with section("format"):
19-
2019
# How wide to allow formatted cmake files
2120
line_width = 80
2221

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ repos:
5353
)$
5454
# For Python files
5555
- repo: https://github.com/psf/black.git
56-
rev: 22.8.0
56+
rev: 23.3.0
5757
hooks:
5858
- id: black
5959
files: (.*\.(py|pyi|bzl)|BUILD|.*\.BUILD|WORKSPACE)$

paddle/fluid/eager/auto_code_generator/generator/eager_gen.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,6 @@ def GenerateCoreOpInfoDeclaration():
547547

548548

549549
def GenerateCoreOpInfoDefinition():
550-
551550
op_args_info_list = []
552551
for op_name, arg_list in core_ops_args_info.items():
553552
arg_str = ",".join(["\"" + v + "\"" for v in arg_list])
@@ -803,7 +802,6 @@ def CollectBackwardInfo(self):
803802
self.backward_returns_list = backward_returns_list_new
804803

805804
def CollectForwardInfoFromBackwardContents(self):
806-
807805
backward_forward_str = self.backward_forward_str
808806

809807
(
@@ -1910,7 +1908,6 @@ def GenerateHigherOrderNodeCreationCode(self):
19101908
self.grad_api_contents["backward_op"] in prim_white_list
19111909
or is_invoke_forward_api
19121910
):
1913-
19141911
next_grad_node_creation_str = f"""
19151912
if (!paddle::prim::PrimCommonUtils::IsEagerPrimEnabled()) {{
19161913
if(trace_backward) {{
@@ -2274,7 +2271,6 @@ def GenerateNodeDefinition(
22742271
egr::EagerUtils::HandleViewBetweenInputAndOutput({inplace_grad_input_str}, api_output_{out_index});
22752272
}}"""
22762273
if IsPlainTensorType(ttype):
2277-
22782274
if (
22792275
backward_inplace_map
22802276
and name in backward_inplace_map.values()

paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -604,7 +604,6 @@ def GenerateCoreOpsInfoMap():
604604

605605

606606
def GeneratePythonCWrappers(python_c_function_str, python_c_function_reg_str):
607-
608607
(
609608
core_ops_infos_definition,
610609
core_ops_infos_registry,

paddle/fluid/operators/generator/generate_op.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,7 @@ def parse_get_expected_kerneltype(
500500
fw_name = op_comp_map['op'].split('(')[0].strip()
501501
# deal the last underline of function name in op_comp_map['get_expected_kernel_type']
502502
new_get_expected_kernel_type_func_map = {}
503-
for (key, value) in op_comp_map['get_expected_kernel_type'].items():
503+
for key, value in op_comp_map['get_expected_kernel_type'].items():
504504
new_get_expected_kernel_type_func_map[
505505
delete_last_underline(key)
506506
] = value

paddle/fluid/operators/generator/parse_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -615,15 +615,15 @@ def cross_validate(ops):
615615
assert len(fw_call["inputs"]) <= len(
616616
fw_op["inputs"]
617617
), f"{name}: forward call has more inputs than the op "
618-
for (input, input_) in zip(fw_call["inputs"], fw_op["inputs"]):
618+
for input, input_ in zip(fw_call["inputs"], fw_op["inputs"]):
619619
assert (
620620
input["typename"] == input_["typename"]
621621
), f"type mismatch in {name} and {fw_name}"
622622

623623
assert len(fw_call["attrs"]) <= len(
624624
fw_op["attrs"]
625625
), f"{name}: forward call has more attrs than the op "
626-
for (attr, attr_) in zip(fw_call["attrs"], fw_op["attrs"]):
626+
for attr, attr_ in zip(fw_call["attrs"], fw_op["attrs"]):
627627
if attr["typename"] == "Scalar":
628628
# special case for Scalar, fw_call can omit the type
629629
assert re.match(
@@ -637,7 +637,7 @@ def cross_validate(ops):
637637
assert len(fw_call["outputs"]) == len(
638638
fw_op["outputs"]
639639
), f"{name}: forward call has more outputs than the op "
640-
for (output, output_) in zip(
640+
for output, output_ in zip(
641641
fw_call["outputs"], fw_op["outputs"]
642642
):
643643
assert (

paddle/phi/api/yaml/generator/backward_api_gen.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,6 @@ def generate_backward_api(
316316
header_file_path,
317317
source_file_path,
318318
):
319-
320319
bw_apis = []
321320
for each_api_yaml in backward_yaml_path:
322321
with open(each_api_yaml, 'r') as f:

paddle/phi/api/yaml/generator/intermediate_api_gen.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,6 @@ def generate_intermediate_api(
9292
dygraph_header_file_path,
9393
dygraph_source_file_path,
9494
):
95-
9695
dygraph_header_file = open(dygraph_header_file_path, 'w')
9796
dygraph_source_file = open(dygraph_source_file_path, 'w')
9897

paddle/phi/api/yaml/generator/sparse_api_gen.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,6 @@ def api_namespace():
351351

352352

353353
def generate_api(api_yaml_path, header_file_path, source_file_path):
354-
355354
with open(api_yaml_path, 'r') as f:
356355
apis = yaml.load(f, Loader=yaml.FullLoader)
357356
header_file = open(header_file_path, 'w')

paddle/phi/api/yaml/generator/sparse_bw_api_gen.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,6 @@ def api_namespace():
158158

159159

160160
def generate_api(api_yaml_path, header_file_path, source_file_path):
161-
162161
with open(api_yaml_path, 'r') as f:
163162
apis = yaml.load(f, Loader=yaml.FullLoader)
164163
header_file = open(header_file_path, 'w')

paddle/phi/api/yaml/generator/strings_api_gen.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,6 @@ def api_namespace():
362362

363363

364364
def generate_api(api_yaml_path, header_file_path, source_file_path):
365-
366365
with open(api_yaml_path, 'r') as f:
367366
apis = yaml.load(f, Loader=yaml.FullLoader)
368367
header_file = open(header_file_path, 'w')

paddle/phi/kernels/sparse/gpu/cutlass_generator/gather_gemm_scatter_generator.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@ def CreateGatherGemmScatterOperator(
6868
for tile_description in tile_descriptions:
6969
for alignment in alignment_constraints:
7070
for complex_transform in complex_transforms:
71-
7271
alignment_c = min(8, alignment)
7372

7473
A = TensorDescription(
@@ -98,7 +97,6 @@ def CreateGatherGemmScatterOperator(
9897

9998

10099
def GenerateSM80_TensorOp_16816(manifest, cuda_version, debug=False):
101-
102100
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
103101
return
104102

@@ -211,7 +209,6 @@ def GenerateSM80_TensorOp_16816(manifest, cuda_version, debug=False):
211209

212210
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
213211
if math_inst.element_a != math_inst.element_accumulator:
214-
215212
data_type_mixed = [
216213
math_inst.element_a,
217214
math_inst.element_b,
@@ -225,7 +222,6 @@ def GenerateSM80_TensorOp_16816(manifest, cuda_version, debug=False):
225222

226223

227224
def GenerateSM80_TensorOp_1688(manifest, cuda_version, debug=False):
228-
229225
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
230226
return
231227

@@ -341,7 +337,6 @@ def GenerateSM80_TensorOp_1688(manifest, cuda_version, debug=False):
341337

342338

343339
def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version, debug=False):
344-
345340
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
346341
return
347342

@@ -443,7 +438,6 @@ def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version, debug=False):
443438
def GenerateSM80_TensorOp_1688_fast_fp32_math(
444439
manifest, cuda_version, debug=False
445440
):
446-
447441
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
448442
return
449443

@@ -525,7 +519,6 @@ def GenerateSM80_TensorOp_1688_fast_fp32_math(
525519

526520

527521
def GenerateSM75_TensorOp_1688(manifest, cuda_version, debug=False):
528-
529522
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
530523
return
531524

@@ -649,7 +642,6 @@ def __init__(
649642

650643

651644
if __name__ == "__main__":
652-
653645
args = KernelCfg(
654646
architectures='80',
655647
build_dir=sys.argv[2],

paddle/phi/kernels/sparse/gpu/cutlass_generator/gather_gemm_scatter_manifest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,6 @@ def __exit__(self, exception_type, exception_value, traceback):
156156

157157
class GatherGemmScatterManifest(Manifest):
158158
def emit(self, target=GeneratorTarget.Library):
159-
160159
operation_emitters = {
161160
GeneratorTarget.Library: GatherGemmScatterEmitOperationKindLibrary
162161
}

paddle/phi/kernels/sparse/gpu/cutlass_generator/gather_gemm_scatter_operation.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ def instance_template(self):
8989
return ""
9090

9191
def emit(self, operation):
92-
9392
threadblock_shape = operation.tile_description.threadblock_shape
9493
warp_count = operation.tile_description.warp_count
9594

@@ -107,7 +106,6 @@ def emit(self, operation):
107106
and operation.B.layout in transpose_layouts.keys()
108107
and operation.C.layout in transpose_layouts.keys()
109108
):
110-
111109
instance_layout_A = transpose_layouts[operation.A.layout]
112110
instance_layout_B = transpose_layouts[operation.B.layout]
113111
instance_layout_C = transpose_layouts[operation.C.layout]
@@ -124,7 +122,6 @@ def emit(self, operation):
124122

125123
# Support built-in epilogue functors or user-defined functions
126124
if isinstance(operation.epilogue_functor, enum.Enum):
127-
128125
epilogue_vector_length = (
129126
min(
130127
operation.C.alignment * DataTypeSize[operation.C.element],
@@ -256,7 +253,6 @@ def __enter__(self):
256253
return self
257254

258255
def __exit__(self, exception_type, exception_value, traceback):
259-
260256
# Write instance definitions in top-level namespace
261257
for instance_definition in self.instance_definitions:
262258
self.configuration_file.write(instance_definition)
@@ -278,7 +274,6 @@ def __init__(
278274
epilogue_functor=EpilogueFunctor.LinearCombination,
279275
swizzling_functor=SwizzlingFunctor.Identity8,
280276
):
281-
282277
super().__init__(
283278
gemm_kind,
284279
arch,

python/paddle/amp/accuracy_compare.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -458,7 +458,6 @@ def _write_titles(self, worksheet, loss_scale, row):
458458
def add_worksheet(
459459
self, mp_tensor_info_list, sheetname, loss_scale, skip_normal_tensors
460460
):
461-
462461
assert self.workbook is not None
463462

464463
worksheet = self.workbook.add_worksheet(sheetname)

python/paddle/amp/debugging.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,6 @@ def __init__(
137137
debug_step=None,
138138
stack_height_limit=1,
139139
):
140-
141140
self.enable = enable
142141
self.debug_mode = debug_mode
143142
self.output_dir = output_dir

python/paddle/amp/grad_scaler.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@ def __init__(
9898
decr_every_n_nan_or_inf=1,
9999
use_dynamic_loss_scaling=True,
100100
):
101-
102101
tracer = _dygraph_tracer()
103102
if not tracer:
104103
raise ValueError(

python/paddle/cost_model/cost_model.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ def profile_measure(
5252
device='gpu',
5353
fetch_cost_list=['time'],
5454
):
55-
5655
place = paddle.set_device('gpu')
5756
x = np.random.random(size=(10, 1)).astype('float32')
5857
exe = paddle.static.Executor(place)

python/paddle/dataset/conll05.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,6 @@ def reader_creator(
151151
):
152152
def reader():
153153
for sentence, predicate, labels in corpus_reader():
154-
155154
sen_len = len(sentence)
156155

157156
verb_index = labels.index('B-V')

python/paddle/dataset/voc2012.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@
4242

4343

4444
def reader_creator(filename, sub_name):
45-
4645
tarobject = tarfile.open(filename)
4746
name2mem = {}
4847
for ele in tarobject.getmembers():

python/paddle/distributed/auto_parallel/random.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,6 @@ def parallel_manual_seed(seed):
6969

7070

7171
def determinate_rng(rank, dims_mapping, process_mesh):
72-
7372
# TODO(JZ-LIANG) Support Mesh with any high rank
7473
# use a string to unique integer hashing algorithm for seed computation.
7574
# instead of using offsets to coodinate seed across devices.
@@ -119,7 +118,6 @@ def determinate_rng(rank, dims_mapping, process_mesh):
119118

120119

121120
def init_auto_parallel_rng():
122-
123121
if not is_enable_auto_rand_ctrl():
124122
return
125123

python/paddle/distributed/auto_parallel/static/auto_align_tool.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ def load(save_dir):
319319
assert os.path.isfile(filepath)
320320
if "vars" in filename:
321321
assert filename.endswith("pkl")
322-
with (open(filepath, "rb")) as f:
322+
with open(filepath, "rb") as f:
323323
vars_list.append(pickle.load(f))
324324
elif "program" in filename:
325325
assert filename.endswith("pdmodel")
@@ -328,7 +328,7 @@ def load(save_dir):
328328
program_list.append(deserialize_program(program_string))
329329
elif "dist_attr" in filename:
330330
assert filename.endswith("pkl")
331-
with (open(filepath, "rb")) as f:
331+
with open(filepath, "rb") as f:
332332
dist_attr_list.append(pickle.load(f))
333333

334334
dist_attr_map = {}

python/paddle/distributed/auto_parallel/static/cluster.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,6 @@ def __repr__(self):
147147

148148

149149
class Link:
150-
151150
default_hop = 1
152151
default_nic_bandwidth = 24
153152

python/paddle/distributed/auto_parallel/static/completion.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1257,7 +1257,6 @@ def _get_op_by_id(ops, id):
12571257

12581258
# grad ops that have not a corresponding mapping in grad_op_id_to_op_id
12591259
else:
1260-
12611260
if grad_op.type == 'sum':
12621261
assert all(map(_is_grad_var_name, grad_op.input_arg_names))
12631262
output_name = grad_op.output_arg_names[0]
@@ -1382,7 +1381,6 @@ def _get_op_by_id(ops, id):
13821381
]
13831382

13841383
for idx in range(first_backward_op_idx, len(ops)):
1385-
13861384
# complete the initial grad loss op
13871385
if idx == first_backward_op_idx:
13881386
assert ops[idx].type == "fill_constant"
@@ -1656,7 +1654,6 @@ def complete_update_annotation(self, serial_main_program):
16561654
learning_rate_completed = False
16571655

16581656
for idx in range(len(ops)):
1659-
16601657
# complete the annotation of the optimizer op.
16611658
# TODO to add attribute for moment var
16621659
op = ops[idx]
@@ -1823,7 +1820,6 @@ def complete_update_annotation(self, serial_main_program):
18231820
)
18241821

18251822
for input_name in op.desc.input_names():
1826-
18271823
if input_name in [
18281824
'Param',
18291825
'Grad',

python/paddle/distributed/auto_parallel/static/cost_model.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,6 @@ def _parse_sub_program(self, program, nodes, graph, cost_data, sub_idx):
316316
if pred.type == CostNodeType.COMPUTATION and (
317317
pred_id in graph[node_id][SUCC]
318318
):
319-
320319
graph[pred_id][SUCC].remove(node_id)
321320
graph[node_id][PRED].remove(pred_id)
322321

0 commit comments

Comments
 (0)