Skip to content

[pre-commit.ci] pre-commit autoupdate #61802

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jul 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ ci:
skip: [pyright, mypy]
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.12
rev: v0.12.2
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
Expand Down Expand Up @@ -47,7 +47,7 @@ repos:
types_or: [python, rst, markdown, cython, c]
additional_dependencies: [tomli]
- repo: https://github.com/MarcoGorelli/cython-lint
rev: v0.16.6
rev: v0.16.7
hooks:
- id: cython-lint
- id: double-quote-cython-strings
Expand Down Expand Up @@ -95,14 +95,14 @@ repos:
- id: sphinx-lint
args: ["--enable", "all", "--disable", "line-too-long"]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v20.1.5
rev: v20.1.7
hooks:
- id: clang-format
files: ^pandas/_libs/src|^pandas/_libs/include
args: [-i]
types_or: [c, c++]
- repo: https://github.com/trim21/pre-commit-mirror-meson
rev: v1.8.1
rev: v1.8.2
hooks:
- id: meson-fmt
args: ['--inplace']
Expand Down
30 changes: 15 additions & 15 deletions asv_bench/benchmarks/gil.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
from .pandas_vb_common import BaseIO # isort:skip


def test_parallel(num_threads=2, kwargs_list=None):
def run_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.

Expand Down Expand Up @@ -95,7 +95,7 @@ def setup(self, threads, method):
{"key": np.random.randint(0, ngroups, size=N), "data": np.random.randn(N)}
)

@test_parallel(num_threads=threads)
@run_parallel(num_threads=threads)
def parallel():
getattr(df.groupby("key")["data"], method)()

Expand Down Expand Up @@ -123,7 +123,7 @@ def setup(self, threads):
ngroups = 10**3
data = Series(np.random.randint(0, ngroups, size=size))

@test_parallel(num_threads=threads)
@run_parallel(num_threads=threads)
def get_groups():
data.groupby(data).groups

Expand All @@ -142,7 +142,7 @@ def setup(self, dtype):
df = DataFrame({"col": np.arange(N, dtype=dtype)})
indexer = np.arange(100, len(df) - 100)

@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def parallel_take1d():
take_nd(df["col"].values, indexer)

Expand All @@ -163,7 +163,7 @@ def setup(self):
k = 5 * 10**5
kwargs_list = [{"arr": np.random.randn(N)}, {"arr": np.random.randn(N)}]

@test_parallel(num_threads=2, kwargs_list=kwargs_list)
@run_parallel(num_threads=2, kwargs_list=kwargs_list)
def parallel_kth_smallest(arr):
algos.kth_smallest(arr, k)

Expand All @@ -180,42 +180,42 @@ def setup(self):
self.period = self.dti.to_period("D")

def time_datetime_field_year(self):
@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def run(dti):
dti.year

run(self.dti)

def time_datetime_field_day(self):
@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def run(dti):
dti.day

run(self.dti)

def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def run(dti):
dti.days_in_month

run(self.dti)

def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def run(dti):
dti.normalize()

run(self.dti)

def time_datetime_to_period(self):
@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def run(dti):
dti.to_period("s")

run(self.dti)

def time_period_to_datetime(self):
@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def run(period):
period.to_timestamp()

Expand All @@ -232,7 +232,7 @@ def setup(self, method):
if hasattr(DataFrame, "rolling"):
df = DataFrame(arr).rolling(win)

@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def parallel_rolling():
getattr(df, method)()

Expand All @@ -249,7 +249,7 @@ def parallel_rolling():
"std": rolling_std,
}

@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def parallel_rolling():
rolling[method](arr, win)

Expand Down Expand Up @@ -286,7 +286,7 @@ def setup(self, dtype):
self.fname = f"__test_{dtype}__.csv"
df.to_csv(self.fname)

@test_parallel(num_threads=2)
@run_parallel(num_threads=2)
def parallel_read_csv():
read_csv(self.fname)

Expand All @@ -305,7 +305,7 @@ class ParallelFactorize:
def setup(self, threads):
strings = Index([f"i-{i}" for i in range(100000)], dtype=object)

@test_parallel(num_threads=threads)
@run_parallel(num_threads=threads)
def parallel():
factorize(strings)

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -7235,7 +7235,7 @@ def sort_values(
indexer = lexsort_indexer(
keys_data, orders=ascending, na_position=na_position, key=key
)
elif len(by):
elif by:
# len(by) == 1

k = self._get_label_or_level_values(by[0], axis=axis)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7635,7 +7635,7 @@ def ensure_index(index_like: Axes, copy: bool = False) -> Index:
# check in clean_index_list
index_like = list(index_like)

if len(index_like) and lib.is_all_arraylike(index_like):
if index_like and lib.is_all_arraylike(index_like):
from pandas.core.indexes.multi import MultiIndex

return MultiIndex.from_arrays(index_like)
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/parsers/base_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def extract(r):
names.insert(single_ic, single_ic)

# Clean the column names (if we have an index_col).
if len(ic):
if ic:
col_names = [
r[ic[0]]
if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols)
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/parsers/python_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ def read(

index: Index | None
columns: Sequence[Hashable] = list(self.orig_names)
if not len(content): # pragma: no cover
if not content: # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
# error: Cannot determine type of 'index_col'
names = dedup_names(
Expand Down
3 changes: 1 addition & 2 deletions pandas/tests/arithmetic/test_datetime64.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
)
from itertools import (
product,
starmap,
)
import operator

Expand Down Expand Up @@ -2211,7 +2210,7 @@ def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):

def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
return np.sum(list(map(np.timedelta64, args, intervals)))

for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
Expand Down
2 changes: 1 addition & 1 deletion pandas/util/_tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
PKG = os.path.dirname(os.path.dirname(__file__))


def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None:
def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None: # noqa: PT028
"""
Run the pandas test suite using pytest.

Expand Down
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,8 @@ ignore = [
"ISC001",
# if-stmt-min-max
"PLR1730",
# nan-comparison
"PLW0177",

### TODO: Enable gradually
# Useless statement
Expand Down
Loading