Skip to content

Commit cbc8ced

Browse files
authored
[CI] Fix big GPU test marker (#11786)
* update * update
1 parent 01240fe commit cbc8ced

13 files changed

+9
-23
lines changed

.github/workflows/nightly_tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ jobs:
248248
BIG_GPU_MEMORY: 40
249249
run: |
250250
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
251-
-m "big_gpu_with_torch_cuda" \
251+
-m "big_accelerator" \
252252
--make-reports=tests_big_gpu_torch_cuda \
253253
--report-log=tests_big_gpu_torch_cuda.log \
254254
tests/

src/diffusers/utils/testing_utils.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -421,6 +421,10 @@ def require_big_accelerator(test_case):
421421
Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines:
422422
Flux, SD3, Cog, etc.
423423
"""
424+
import pytest
425+
426+
test_case = pytest.mark.big_accelerator(test_case)
427+
424428
if not is_torch_available():
425429
return unittest.skip("test requires PyTorch")(test_case)
426430

tests/conftest.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,10 @@
3030
warnings.simplefilter(action="ignore", category=FutureWarning)
3131

3232

33+
def pytest_configure(config):
34+
config.addinivalue_line("markers", "big_accelerator: marks tests as requiring big accelerator resources")
35+
36+
3337
def pytest_addoption(parser):
3438
from diffusers.utils.testing_utils import pytest_addoption_shared
3539

tests/lora/test_lora_layers_flux.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import unittest
2121

2222
import numpy as np
23-
import pytest
2423
import safetensors.torch
2524
import torch
2625
from parameterized import parameterized
@@ -813,7 +812,6 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
813812
@require_torch_accelerator
814813
@require_peft_backend
815814
@require_big_accelerator
816-
@pytest.mark.big_accelerator
817815
class FluxLoRAIntegrationTests(unittest.TestCase):
818816
"""internal note: The integration slices were obtained on audace.
819817
@@ -960,7 +958,6 @@ def test_flux_xlabs_load_lora_with_single_blocks(self):
960958
@require_torch_accelerator
961959
@require_peft_backend
962960
@require_big_accelerator
963-
@pytest.mark.big_accelerator
964961
class FluxControlLoRAIntegrationTests(unittest.TestCase):
965962
num_inference_steps = 10
966963
seed = 0

tests/lora/test_lora_layers_hunyuanvideo.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
import unittest
1818

1919
import numpy as np
20-
import pytest
2120
import torch
2221
from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast
2322

@@ -198,7 +197,6 @@ def test_simple_inference_with_text_lora_save_load(self):
198197
@require_torch_accelerator
199198
@require_peft_backend
200199
@require_big_accelerator
201-
@pytest.mark.big_accelerator
202200
class HunyuanVideoLoRAIntegrationTests(unittest.TestCase):
203201
"""internal note: The integration slices were obtained on DGX.
204202

tests/lora/test_lora_layers_sd3.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
import unittest
1818

1919
import numpy as np
20-
import pytest
2120
import torch
2221
from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
2322

@@ -139,7 +138,6 @@ def test_multiple_wrong_adapter_name_raises_error(self):
139138
@require_torch_accelerator
140139
@require_peft_backend
141140
@require_big_accelerator
142-
@pytest.mark.big_accelerator
143141
class SD3LoraIntegrationTests(unittest.TestCase):
144142
pipeline_class = StableDiffusion3Img2ImgPipeline
145143
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"

tests/pipelines/controlnet_flux/test_controlnet_flux.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
import unittest
1818

1919
import numpy as np
20-
import pytest
2120
import torch
2221
from huggingface_hub import hf_hub_download
2322
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
@@ -211,7 +210,6 @@ def test_flux_image_output_shape(self):
211210

212211
@nightly
213212
@require_big_accelerator
214-
@pytest.mark.big_accelerator
215213
class FluxControlNetPipelineSlowTests(unittest.TestCase):
216214
pipeline_class = FluxControlNetPipeline
217215

tests/pipelines/controlnet_sd3/test_controlnet_sd3.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from typing import Optional
1919

2020
import numpy as np
21-
import pytest
2221
import torch
2322
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
2423

@@ -221,7 +220,6 @@ def test_xformers_attention_forwardGenerator_pass(self):
221220

222221
@slow
223222
@require_big_accelerator
224-
@pytest.mark.big_accelerator
225223
class StableDiffusion3ControlNetPipelineSlowTests(unittest.TestCase):
226224
pipeline_class = StableDiffusion3ControlNetPipeline
227225

tests/pipelines/flux/test_pipeline_flux.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import unittest
33

44
import numpy as np
5-
import pytest
65
import torch
76
from huggingface_hub import hf_hub_download
87
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel
@@ -224,7 +223,6 @@ def test_flux_true_cfg(self):
224223

225224
@nightly
226225
@require_big_accelerator
227-
@pytest.mark.big_accelerator
228226
class FluxPipelineSlowTests(unittest.TestCase):
229227
pipeline_class = FluxPipeline
230228
repo_id = "black-forest-labs/FLUX.1-schnell"
@@ -312,7 +310,6 @@ def test_flux_inference(self):
312310

313311
@slow
314312
@require_big_accelerator
315-
@pytest.mark.big_accelerator
316313
class FluxIPAdapterPipelineSlowTests(unittest.TestCase):
317314
pipeline_class = FluxPipeline
318315
repo_id = "black-forest-labs/FLUX.1-dev"

tests/pipelines/flux/test_pipeline_flux_redux.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import unittest
33

44
import numpy as np
5-
import pytest
65
import torch
76

87
from diffusers import FluxPipeline, FluxPriorReduxPipeline
@@ -19,7 +18,6 @@
1918

2019
@slow
2120
@require_big_accelerator
22-
@pytest.mark.big_accelerator
2321
class FluxReduxSlowTests(unittest.TestCase):
2422
pipeline_class = FluxPriorReduxPipeline
2523
repo_id = "black-forest-labs/FLUX.1-Redux-dev"

0 commit comments

Comments
 (0)