We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
triton_kernels
bea27e3
1 parent 57c0faf commit ecbe70bCopy full SHA for ecbe70b
python/triton_kernels/tests/conftest.py
@@ -37,4 +37,5 @@ def pytest_configure(config):
37
if worker_id is not None and worker_id.startswith("gw"):
38
import torch
39
gpu_id = int(worker_id[2:]) # map gw0 → 0, gw1 → 1, ...
40
- os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id % torch.cuda.device_count())
+ if torch.cuda.is_available():
41
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id % torch.cuda.device_count())
0 commit comments