Skip to content

Commit 57e230c

Browse files
authored
Fix more pylint warnings (#40204)
Fix pylint warnings Signed-off-by: cyy <cyyever@outlook.com>
1 parent 47938f8 commit 57e230c

14 files changed

+17
-26
lines changed

src/transformers/image_transforms.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -748,7 +748,7 @@ def _expand_for_data_format(values):
748748
elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):
749749
values = (values, values)
750750
elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):
751-
values = values
751+
pass
752752
else:
753753
raise ValueError(f"Unsupported format: {values}")
754754

src/transformers/integrations/mxfp4.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def routing_torch_dist(
228228

229229
with torch.cuda.device(logits.device):
230230
world_size = torch.distributed.get_world_size()
231-
rank = int(os.environ.get("LOCAL_RANK", 0))
231+
rank = int(os.environ.get("LOCAL_RANK", "0"))
232232
replace_value = -1
233233

234234
n_tokens = logits.shape[0]

src/transformers/modeling_flash_attention_utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,6 @@ def _lazy_define_process_function(flash_function):
112112
NOTE: While all supported kwargs are marked as `True`, everything else is marked as `False`.
113113
This might be confusing for kwargs that we use in any case, e.g. `is_causal`.
114114
"""
115-
global _process_flash_kwargs_fn, _hf_api_to_flash_mapping
116115

117116
flash_parameters = inspect.signature(flash_function).parameters
118117
process_parameters = inspect.signature(_process_flash_attention_kwargs).parameters

src/transformers/modeling_rope_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ def find_correction_range(low_rot, high_rot, dim, base, max_position_embeddings,
257257
low = find_correction_dim(low_rot, dim, base, max_position_embeddings)
258258
high = find_correction_dim(high_rot, dim, base, max_position_embeddings)
259259
if truncate:
260-
low = low = math.floor(low)
260+
low = math.floor(low)
261261
high = math.ceil(high)
262262
return max(low, 0), min(high, dim - 1)
263263

src/transformers/modeling_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -409,8 +409,7 @@ def get_state_dict_dtype(state_dict):
409409
return t.dtype
410410

411411
# if no floating dtype was found return whatever the first dtype is
412-
else:
413-
return next(state_dict.values()).dtype
412+
return next(state_dict.values()).dtype
414413

415414

416415
def load_sharded_checkpoint(model, folder, strict=True, prefer_safe=True):

src/transformers/pipelines/table_question_answering.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,6 @@ def sequential_inference(self, **inputs):
267267
)
268268

269269
coords_to_probs = collections.defaultdict(list)
270-
token_type_ids_example = token_type_ids_example
271270
for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()):
272271
segment_id = token_type_ids_example[:, 0].tolist()[i]
273272
col = token_type_ids_example[:, 1].tolist()[i] - 1

src/transformers/processing_utils.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1391,12 +1391,11 @@ def get_possibly_dynamic_module(module_name):
13911391
return custom_subclass
13921392
elif custom_class is not None and custom_class.__name__ == module_name:
13931393
return custom_class
1394-
else:
1395-
raise ValueError(
1396-
f"Could not find module {module_name} in `transformers`. If this is a custom class, "
1397-
f"it should be registered using the relevant `AutoClass.register()` function so that "
1398-
f"other functions can find it!"
1399-
)
1394+
raise ValueError(
1395+
f"Could not find module {module_name} in `transformers`. If this is a custom class, "
1396+
f"it should be registered using the relevant `AutoClass.register()` function so that "
1397+
f"other functions can find it!"
1398+
)
14001399

14011400
@property
14021401
def model_input_names(self):
@@ -1469,7 +1468,7 @@ def apply_chat_template(
14691468
chat_template = self.chat_template[chat_template]
14701469
else:
14711470
# It's a template string, render it directly
1472-
chat_template = chat_template
1471+
pass
14731472

14741473
is_tokenizers_fast = hasattr(self, "tokenizer") and self.tokenizer.__class__.__name__.endswith("Fast")
14751474

src/transformers/utils/auto_docstring.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1120,9 +1120,8 @@ def get_model_name(obj):
11201120
if file_name.startswith(start) and file_name.endswith(end):
11211121
model_name_lowercase = file_name[len(start) : -len(end)]
11221122
return model_name_lowercase
1123-
else:
1124-
print(f"🚨 Something went wrong trying to find the model name in the path: {path}")
1125-
return "model"
1123+
print(f"🚨 Something went wrong trying to find the model name in the path: {path}")
1124+
return "model"
11261125

11271126

11281127
def get_placeholders_dict(placeholders: list, model_name: str) -> dict:

src/transformers/utils/fx.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -815,7 +815,6 @@ def _proxies_to_metas(v):
815815

816816
def create_cache_proxy_factory_fn(orig_cache_cls: type[Cache]) -> Callable[[Node], HFCacheProxy]:
817817
def cache_proxy_factory_fn(n: Node) -> HFCacheProxy:
818-
global _CURRENT_TRACER
819818
if not isinstance(_CURRENT_TRACER, HFTracer):
820819
raise RuntimeError("Cannot create HFCacheProxy because there is no HFTracer currently tracing.")
821820
cache_proxy = HFCacheProxy(n, _CURRENT_TRACER)

src/transformers/utils/generic.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -746,8 +746,7 @@ def infer_framework(model_class):
746746
return "pt"
747747
elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel":
748748
return "flax"
749-
else:
750-
raise TypeError(f"Could not infer framework from class {model_class}.")
749+
raise TypeError(f"Could not infer framework from class {model_class}.")
751750

752751

753752
def torch_int(x):

0 commit comments

Comments
 (0)