|
19 | 19 | jobs: |
20 | 20 | setup_torch_cuda_pipeline_matrix: |
21 | 21 | name: Setup Torch Pipelines CUDA Slow Tests Matrix |
22 | | - runs-on: [ self-hosted, intel-cpu, 8-cpu, ci ] |
| 22 | + runs-on: |
| 23 | + group: aws-general-8-plus |
23 | 24 | container: |
24 | 25 | image: diffusers/diffusers-pytorch-cpu |
25 | 26 | outputs: |
|
57 | 58 | max-parallel: 8 |
58 | 59 | matrix: |
59 | 60 | module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }} |
60 | | - runs-on: [single-gpu, nvidia-gpu, t4, ci] |
| 61 | + runs-on: |
| 62 | + group: aws-g4dn-2xlarge |
61 | 63 | container: |
62 | 64 | image: diffusers/diffusers-pytorch-cuda |
63 | 65 | options: --shm-size "16gb" --ipc host --gpus 0 |
@@ -101,7 +103,8 @@ jobs: |
101 | 103 |
|
102 | 104 | torch_cuda_tests: |
103 | 105 | name: Torch CUDA Tests |
104 | | - runs-on: [single-gpu, nvidia-gpu, t4, ci] |
| 106 | + runs-on: |
| 107 | + group: aws-g4dn-2xlarge |
105 | 108 | container: |
106 | 109 | image: diffusers/diffusers-pytorch-cuda |
107 | 110 | options: --shm-size "16gb" --ipc host --gpus 0 |
@@ -201,7 +204,8 @@ jobs: |
201 | 204 |
|
202 | 205 | onnx_cuda_tests: |
203 | 206 | name: ONNX CUDA Tests |
204 | | - runs-on: [single-gpu, nvidia-gpu, t4, ci] |
| 207 | + runs-on: |
| 208 | + group: aws-g4dn-2xlarge |
205 | 209 | container: |
206 | 210 | image: diffusers/diffusers-onnxruntime-cuda |
207 | 211 | options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 |
@@ -249,7 +253,8 @@ jobs: |
249 | 253 | run_torch_compile_tests: |
250 | 254 | name: PyTorch Compile CUDA tests |
251 | 255 |
|
252 | | - runs-on: [single-gpu, nvidia-gpu, t4, ci] |
| 256 | + runs-on: |
| 257 | + group: aws-g4dn-2xlarge |
253 | 258 |
|
254 | 259 | container: |
255 | 260 | image: diffusers/diffusers-pytorch-compile-cuda |
@@ -291,7 +296,8 @@ jobs: |
291 | 296 | run_xformers_tests: |
292 | 297 | name: PyTorch xformers CUDA tests |
293 | 298 |
|
294 | | - runs-on: [single-gpu, nvidia-gpu, t4, ci] |
| 299 | + runs-on: |
| 300 | + group: aws-g4dn-2xlarge |
295 | 301 |
|
296 | 302 | container: |
297 | 303 | image: diffusers/diffusers-pytorch-xformers-cuda |
@@ -332,7 +338,8 @@ jobs: |
332 | 338 | run_examples_tests: |
333 | 339 | name: Examples PyTorch CUDA tests on Ubuntu |
334 | 340 |
|
335 | | - runs-on: [single-gpu, nvidia-gpu, t4, ci] |
| 341 | + runs-on: |
| 342 | + group: aws-g4dn-2xlarge |
336 | 343 |
|
337 | 344 | container: |
338 | 345 | image: diffusers/diffusers-pytorch-cuda |
|
0 commit comments