Skip to content

add a quick fail fast feed-back actions before the full tests is triggered #3541

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 10 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 49 additions & 8 deletions .github/workflows/build-test-linux-x86_64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,50 @@ jobs:
smoke-test-script: ${{ matrix.smoke-test-script }}
trigger-event: ${{ github.event_name }}

tests-py-fail-fast:
name: Test fail fast [Python]
needs: [filter-matrix, build]
strategy:
fail-fast: false
matrix:
include:
- repository: pytorch/tensorrt
package-name: torch_tensorrt
pre-script: packaging/pre_build_script.sh
post-script: packaging/post_build_script.sh
smoke-test-script: packaging/smoke_test_script.sh
uses: ./.github/workflows/linux-test.yml
with:
job-name: tests-py-critical-fail-fast
repository: "pytorch/tensorrt"
ref: ""
test-infra-repository: pytorch/test-infra
test-infra-ref: main
build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
pre-script: ${{ matrix.pre-script }}
script: |
export USE_HOST_DEPS=1
export CI_BUILD=1
export LD_LIBRARY_PATH=/usr/lib64:$LD_LIBRARY_PATH
pushd .
cd tests/py
python -m pip install -r requirements.txt

# test dynamo
python -m pytest -m critical --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_backend_test_results.xml -n 4 dynamo/backend/
python -m pytest -m critical -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_models_result.xml --ir dynamo dynamo/models/
python -m pytest -m critical --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_automatic_plugin_results.xml dynamo/automatic_plugin/
python -m pytest -m critical --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_partitioning_results.xml dynamo/partitioning/
python -m pytest -m critical --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_lowering_results.xml dynamo/lowering/
python -m pytest -m critical --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_runtime_results.xml dynamo/runtime/

# test core
python -m pytest -m critical --junitxml=${RUNNER_TEST_RESULTS_DIR}/core_test_results.xml core/
popd

tests-py-torchscript-fe:
name: Test torchscript frontend [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -113,7 +154,7 @@ jobs:

tests-py-dynamo-converters:
name: Test dynamo converters [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -148,7 +189,7 @@ jobs:

tests-py-dynamo-fe:
name: Test dynamo frontend [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -180,7 +221,7 @@ jobs:

tests-py-dynamo-serde:
name: Test dynamo export serde [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -212,7 +253,7 @@ jobs:

tests-py-torch-compile-be:
name: Test torch compile backend [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -245,7 +286,7 @@ jobs:

tests-py-dynamo-core:
name: Test dynamo core [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -278,7 +319,7 @@ jobs:

tests-py-dynamo-cudagraphs:
name: Test dynamo cudagraphs [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -311,7 +352,7 @@ jobs:

tests-py-core:
name: Test core [Python]
needs: [filter-matrix, build]
needs: [filter-matrix, build, tests-py-fail-fast]
strategy:
fail-fast: false
matrix:
Expand Down
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -75,4 +75,7 @@ tests/py/dynamo/models/*.ep
*.deb
*.tar.xz
MODULE.bazel.lock
*.whl
*.whl
.coverage
coverage.xml
*.log
44 changes: 44 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,50 @@ Changelog = "https://github.com/pytorch/tensorrt/releases"
package-dir = { "" = "py" }
include-package-data = false

[tool.pytest.ini_options]
testpaths = ["tests/py"]
addopts = "-ra --cov=py/torch_tensorrt --cov-report=html --cov-report=xml:coverage.xml --cov-config=pyproject.toml"

[tool.coverage.paths]
source = [
"py/torch_tensorrt/",
"*/site-packages/torch_tensorrt/"
]
omit = [
"tests/*"
]

[tool.coverage.run]
relative_files=true
branch = true

[tool.coverage.report]
fail_under = 20
skip_covered = true
ignore_errors = true
exclude_lines = [
"pragma: no cover",
# Don't complain about missing debug or verbose code
"def __repr__",
"if verbose",
# Don't complain if tests don't hit defensive exception handling code
"raise AssertionError",
"raise NotImplementedError",
"raise RuntimeError",
"raise ValueError",
"raise KeyError",
"raise AttributeError",
"except ImportError",
# Don't complain if non-runnable code isn't run
"if __name__ == \"__main__\":",
"if TYPE_CHECKING:",
# Don't complain about abstract methods, they aren't run
"@(abc\\.)?abstractmethod",
]

[tool.coverage.html]
directory = "coverage_html"

[tool.uv]
package = true
environments = ["sys_platform == 'linux'", "sys_platform == 'windows'"]
Expand Down
5 changes: 3 additions & 2 deletions tests/py/core/test_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,17 @@
import unittest
from typing import Dict

import pytest
import tensorrt as trt
import torch
import torch_tensorrt
import torch_tensorrt as torchtrt
import torchvision.models as models
from torch_tensorrt.dynamo.runtime._TorchTensorRTModule import TorchTensorRTModule

import tensorrt as trt


class TestDevice(unittest.TestCase):
@pytest.mark.critical
def test_from_string_constructor(self):
device = torchtrt.Device("cuda:0")
self.assertEqual(device.device_type, torchtrt.DeviceType.GPU)
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/automatic_plugin/test_automatic_plugin.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Tuple

import pytest
import torch
import torch.nn as nn
import torch_tensorrt
Expand Down Expand Up @@ -66,6 +67,7 @@ class TestAutomaticPlugin(DispatchTestCase):
((256, 256), torch.int),
]
)
@pytest.mark.critical
def test_mul_plugin_float(self, input_shape, dtype):
class elementwise_mul(nn.Module):
def forward(self, lhs, rhs):
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/backend/test_backend_compiler.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# type: ignore
from copy import deepcopy

import pytest
import torch
import torch_tensorrt
from torch.testing._internal.common_utils import TestCase, run_tests
Expand All @@ -10,6 +11,7 @@


class TestTRTModuleNextCompilation(TestCase):
@pytest.mark.critical
def test_trt_module_next_full_support(self):
class FullySupportedMultiOp(torch.nn.Module):
def forward(self, x, y):
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/distributed/test_nccl_ops.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os

import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
Expand All @@ -17,6 +18,7 @@


class TestGatherNcclOpsConverter(DispatchTestCase):
@pytest.mark.critical
@parameterized.expand([8])
def test_nccl_ops(self, linear_layer_dim):
class DistributedGatherModel(nn.Module):
Expand Down
3 changes: 3 additions & 0 deletions tests/py/dynamo/lowering/test_aten_lowering_passes.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import pytest
import torch
import torch_tensorrt
from torch.testing._internal.common_utils import TestCase, run_tests
Expand All @@ -6,6 +7,7 @@


class TestInputAsOutput(TestCase):
@pytest.mark.critical
def test_input_as_output(self):
class InputAsOutput(torch.nn.Module):
def forward(self, x, y):
Expand Down Expand Up @@ -56,6 +58,7 @@ def forward(self, x, y):


class TestLoweringPassMembership(TestCase):
@pytest.mark.critical
def insert_at_end(self):
from torch_tensorrt.dynamo.lowering.passes import (
ATEN_LOWERING_PASSES,
Expand Down
1 change: 1 addition & 0 deletions tests/py/dynamo/lowering/test_decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@


class TestLowering(TestCase):
@pytest.mark.critical
def test_lowering_inplace_op(self):
class InPlace(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/models/test_dyn_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
assertions = unittest.TestCase()


@pytest.mark.critical
@pytest.mark.unit
def test_base_dynamic(ir):
"""
Expand Down Expand Up @@ -175,6 +176,7 @@ def forward(self, x):
)


@pytest.mark.critical
@pytest.mark.unit
def test_resnet_dynamic(ir):
"""
Expand Down
1 change: 1 addition & 0 deletions tests/py/dynamo/models/test_engine_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ def load(self, hash: str, prefix: str = "blob") -> Optional[bytes]:


class TestHashFunction(TestCase):
@pytest.mark.critical
def test_reexport_is_equal(self):
pyt_model = models.resnet18(pretrained=True).eval().to("cuda")
example_inputs = (torch.randn((100, 3, 224, 224)).to("cuda"),)
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/models/test_export_kwargs_serde.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@


@pytest.mark.unit
@pytest.mark.critical
def test_custom_model():
class net(nn.Module):
def __init__(self):
Expand Down Expand Up @@ -83,6 +84,7 @@ def forward(self, x, b=5, c=None, d=None):


@pytest.mark.unit
@pytest.mark.critical
def test_custom_model_with_dynamo_trace():
class net(nn.Module):
def __init__(self):
Expand Down
1 change: 1 addition & 0 deletions tests/py/dynamo/models/test_export_serde.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
trt_ep_path = os.path.join(tempfile.gettempdir(), "trt.ep")


@pytest.mark.critical
@pytest.mark.unit
def test_base_full_compile(ir):
"""
Expand Down
1 change: 1 addition & 0 deletions tests/py/dynamo/models/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ def test_resnet18_cpu_offload(ir):


@pytest.mark.unit
@pytest.mark.critical
def test_mobilenet_v2(ir):
model = models.mobilenet_v2(pretrained=True).eval().to("cuda")
input = torch.randn((1, 3, 224, 224)).to("cuda")
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/partitioning/test_fast_partitioning.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from copy import deepcopy

import numpy as np
import pytest
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch_tensorrt.dynamo import partitioning
Expand Down Expand Up @@ -55,6 +56,7 @@ def forward(self, x, y):
"Single operators can be segmented if full compilation is required",
)

@pytest.mark.critical
def test_partition_fully_supported_multi_op(self):
class FullySupportedMultiOp(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
Expand Down
1 change: 1 addition & 0 deletions tests/py/dynamo/partitioning/test_global_partitioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def forward(self, x, y):
"Single operators can be segmented if full compilation is required",
)

@pytest.mark.critical
def test_partition_fully_supported_multi_op(self):
class FullySupportedMultiOp(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
import unittest

import pytest
import tensorrt as trt
import torch
import torch_tensorrt
from torch_tensorrt.dynamo.runtime import PythonTorchTensorRTModule
from torch_tensorrt.dynamo.utils import COSINE_THRESHOLD, cosine_similarity

import tensorrt as trt


class TestConvertModuleToTrtEngine(unittest.TestCase):
@pytest.mark.critical
def test_convert_module(self):
class Test(torch.nn.Module):
def forward(self, a, b):
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/runtime/test_output_allocator.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ class TestOutputAllocatorStaticModel(TestCase):
("cpp_runtime", False),
]
)
@pytest.mark.critical
def test_cudagraphs_and_output_allocator(self, _, use_python_runtime):
model = StaticModel().eval().cuda()
inputs = [torch.randn((2, 3), dtype=torch.float).cuda()]
Expand Down Expand Up @@ -157,6 +158,7 @@ class TestOutputAllocatorDDSModel(TestCase):
("cpp_runtime", False),
]
)
@pytest.mark.critical
def test_cudagraphs_and_output_allocator(self, _, use_python_runtime):
model = DDSModel().eval().cuda()
inputs = (torch.randint(low=0, high=3, size=(10,), dtype=torch.int).to("cuda"),)
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/runtime/test_pre_allocated_outputs.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import pytest
import torch
import torch_tensorrt as torchtrt
from parameterized import parameterized
Expand All @@ -14,6 +15,7 @@ class TestPreAllocatedOutputs(TestCase):
("cpp_runtime", False),
]
)
@pytest.mark.critical
def test_pre_allocated_outputs_default(self, _, use_python_runtime):
class SampleModel(torch.nn.Module):
def forward(self, x):
Expand Down
Loading