Skip to content

Commit 84b80af

Browse files
committed
Merge branch 'main' of github.com:huggingface/transformers into gemma3n-fixes
2 parents 3eda455 + 2bcf9f6 commit 84b80af

File tree

793 files changed

+28156
-2315
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

793 files changed

+28156
-2315
lines changed

.circleci/create_circleci_config.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,9 @@ def __post_init__(self):
109109
self.docker_image[0]["image"] = f"{self.docker_image[0]['image']}:dev"
110110
print(f"Using {self.docker_image} docker image")
111111
if self.install_steps is None:
112-
self.install_steps = ["uv venv && uv pip install ."]
112+
self.install_steps = ["uv pip install ."]
113+
# Use a custom patched pytest to force exit the process at the end, to avoid `Too long with no output (exceeded 10m0s): context deadline exceeded`
114+
self.install_steps.append("uv pip install git+https://github.com/ydshieh/pytest.git@8.4.1-ydshieh")
113115
if self.pytest_options is None:
114116
self.pytest_options = {}
115117
if isinstance(self.tests_to_run, str):
@@ -213,7 +215,7 @@ def job_name(self):
213215
docker_image=[{"image": "huggingface/transformers-torch-light"}],
214216
# networkx==3.3 (after #36957) cause some issues
215217
# TODO: remove this once it works directly
216-
install_steps=["uv venv && uv pip install ."],
218+
install_steps=["uv pip install ."],
217219
marker="generate",
218220
parallelism=6,
219221
)
@@ -250,7 +252,7 @@ def job_name(self):
250252
additional_env={"OMP_NUM_THREADS": 8},
251253
docker_image=[{"image":"huggingface/transformers-examples-torch"}],
252254
# TODO @ArthurZucker remove this once docker is easier to build
253-
install_steps=["uv venv && uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"],
255+
install_steps=["uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"],
254256
pytest_num_workers=4,
255257
)
256258

@@ -259,7 +261,7 @@ def job_name(self):
259261
additional_env={"HUGGINGFACE_CO_STAGING": True},
260262
docker_image=[{"image":"huggingface/transformers-torch-light"}],
261263
install_steps=[
262-
'uv venv && uv pip install .',
264+
'uv pip install .',
263265
'git config --global user.email "ci@dummy.com"',
264266
'git config --global user.name "ci"',
265267
],
@@ -273,7 +275,6 @@ def job_name(self):
273275
"onnx",
274276
docker_image=[{"image":"huggingface/transformers-torch-tf-light"}],
275277
install_steps=[
276-
"uv venv",
277278
"uv pip install .[testing,sentencepiece,onnxruntime,vision,rjieba]",
278279
],
279280
pytest_options={"k onnx": None},
@@ -303,7 +304,7 @@ def job_name(self):
303304
docker_image=[{"image": "huggingface/transformers-torch-light"}],
304305
# networkx==3.3 (after #36957) cause some issues
305306
# TODO: remove this once it works directly
306-
install_steps=["uv venv && uv pip install .[serving]"],
307+
install_steps=["uv pip install .[serving]"],
307308
marker="not generate",
308309
parallelism=6,
309310
)
@@ -321,7 +322,7 @@ def job_name(self):
321322
additional_env={"TRANSFORMERS_VERBOSITY": "error", "DATASETS_VERBOSITY": "error", "SKIP_CUDA_DOCTEST": "1"},
322323
install_steps=[
323324
# Add an empty file to keep the test step running correctly even no file is selected to be tested.
324-
"uv venv && pip install .",
325+
"uv pip install .",
325326
"touch dummy.py",
326327
command,
327328
"cat pr_documentation_tests_temp.txt",

.github/workflows/self-scheduled-amd-mi300-caller.yml

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@ name: Self-hosted runner scale set (AMD mi300 scheduled CI caller)
55
# 2gpu scale set: amd-mi300-ci-2gpu
66

77
on:
8-
workflow_run:
9-
workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
10-
branches: ["main"]
11-
types: [completed]
8+
#workflow_run:
9+
# workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
10+
# branches: ["main"]
11+
# types: [completed]
1212
push:
1313
branches:
1414
- run_amd_scheduled_ci_caller*
@@ -24,6 +24,7 @@ jobs:
2424
docker: huggingface/transformers-pytorch-amd-gpu
2525
ci_event: Scheduled CI (AMD) - mi300
2626
report_repo_id: optimum-amd/transformers_daily_ci
27+
env_file: /etc/podinfo/gha-gpu-isolation-settings
2728
secrets: inherit
2829

2930
torch-pipeline:
@@ -36,6 +37,7 @@ jobs:
3637
docker: huggingface/transformers-pytorch-amd-gpu
3738
ci_event: Scheduled CI (AMD) - mi300
3839
report_repo_id: optimum-amd/transformers_daily_ci
40+
env_file: /etc/podinfo/gha-gpu-isolation-settings
3941
secrets: inherit
4042

4143
example-ci:
@@ -48,6 +50,7 @@ jobs:
4850
docker: huggingface/transformers-pytorch-amd-gpu
4951
ci_event: Scheduled CI (AMD) - mi300
5052
report_repo_id: optimum-amd/transformers_daily_ci
53+
env_file: /etc/podinfo/gha-gpu-isolation-settings
5154
secrets: inherit
5255

5356
deepspeed-ci:
@@ -60,4 +63,5 @@ jobs:
6063
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
6164
ci_event: Scheduled CI (AMD) - mi300
6265
report_repo_id: optimum-amd/transformers_daily_ci
66+
env_file: /etc/podinfo/gha-gpu-isolation-settings
6367
secrets: inherit

.github/workflows/self-scheduled-amd-mi325-caller.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ jobs:
2424
docker: huggingface/transformers-pytorch-amd-gpu
2525
ci_event: Scheduled CI (AMD) - mi325
2626
report_repo_id: optimum-amd/transformers_daily_ci
27+
env_file: /etc/podinfo/gha-gpu-isolation-settings
2728
secrets: inherit
2829

2930
torch-pipeline:
@@ -36,6 +37,7 @@ jobs:
3637
docker: huggingface/transformers-pytorch-amd-gpu
3738
ci_event: Scheduled CI (AMD) - mi325
3839
report_repo_id: optimum-amd/transformers_daily_ci
40+
env_file: /etc/podinfo/gha-gpu-isolation-settings
3941
secrets: inherit
4042

4143
example-ci:
@@ -48,6 +50,7 @@ jobs:
4850
docker: huggingface/transformers-pytorch-amd-gpu
4951
ci_event: Scheduled CI (AMD) - mi325
5052
report_repo_id: optimum-amd/transformers_daily_ci
53+
env_file: /etc/podinfo/gha-gpu-isolation-settings
5154
secrets: inherit
5255

5356
deepspeed-ci:
@@ -60,4 +63,5 @@ jobs:
6063
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
6164
ci_event: Scheduled CI (AMD) - mi325
6265
report_repo_id: optimum-amd/transformers_daily_ci
66+
env_file: /etc/podinfo/gha-gpu-isolation-settings
6367
secrets: inherit
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
name: Self-hosted runner scale set (AMD mi355 scheduled CI caller)
2+
3+
# Note: For every job in this workflow, the name of the runner scale set is finalized in the runner yaml i.e. huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml
4+
# For example, 1gpu : amd-mi355-ci-1gpu
5+
# 2gpu : amd-mi355-ci-2gpu
6+
7+
on:
8+
workflow_run:
9+
workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
10+
branches: ["main"]
11+
types: [completed]
12+
push:
13+
branches:
14+
- run_amd_scheduled_ci_caller*
15+
16+
jobs:
17+
model-ci:
18+
name: Model CI
19+
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
20+
with:
21+
job: run_models_gpu
22+
slack_report_channel: "#amd-hf-ci"
23+
runner_scale_set: amd-mi355-ci
24+
docker: huggingface/transformers-pytorch-amd-gpu
25+
ci_event: Scheduled CI (AMD) - mi355
26+
report_repo_id: optimum-amd/transformers_daily_ci
27+
secrets: inherit
28+
29+
torch-pipeline:
30+
name: Torch pipeline CI
31+
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
32+
with:
33+
job: run_pipelines_torch_gpu
34+
slack_report_channel: "#amd-hf-ci"
35+
runner_scale_set: amd-mi355-ci
36+
docker: huggingface/transformers-pytorch-amd-gpu
37+
ci_event: Scheduled CI (AMD) - mi355
38+
report_repo_id: optimum-amd/transformers_daily_ci
39+
secrets: inherit
40+
41+
example-ci:
42+
name: Example CI
43+
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
44+
with:
45+
job: run_examples_gpu
46+
slack_report_channel: "#amd-hf-ci"
47+
runner_scale_set: amd-mi355-ci
48+
docker: huggingface/transformers-pytorch-amd-gpu
49+
ci_event: Scheduled CI (AMD) - mi355
50+
report_repo_id: optimum-amd/transformers_daily_ci
51+
secrets: inherit
52+
53+
deepspeed-ci:
54+
name: DeepSpeed CI
55+
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
56+
with:
57+
job: run_torch_cuda_extensions_gpu
58+
slack_report_channel: "#amd-hf-ci"
59+
runner_scale_set: amd-mi355-ci
60+
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
61+
ci_event: Scheduled CI (AMD) - mi355
62+
report_repo_id: optimum-amd/transformers_daily_ci
63+
secrets: inherit

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ repo-consistency:
5252
python utils/check_doctest_list.py
5353
python utils/update_metadata.py --check-only
5454
python utils/check_docstrings.py
55+
python utils/add_dates.py
5556

5657
# this target runs checks on all files
5758

benchmark/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
benchmark_results/

0 commit comments

Comments
 (0)