Skip to content

Commit

Permalink
Change GPU Runners (#7840)
Browse files Browse the repository at this point in the history
* Move to new GPU Runners for slow tests

* Move to new GPU Runners for nightly tests
  • Loading branch information
glegendre01 authored May 2, 2024
1 parent 44ba90c commit ce97d7e
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/nightly_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ jobs:
run_nightly_tests_for_other_torch_modules:
name: Torch Non-Pipelines CUDA Nightly Tests
runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]
container:
image: diffusers/diffusers-pytorch-cuda
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
Expand Down Expand Up @@ -185,7 +185,7 @@ jobs:
run_lora_nightly_tests:
name: Nightly LoRA Tests with PEFT and TORCH
runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]
container:
image: diffusers/diffusers-pytorch-cuda
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
Expand Down Expand Up @@ -298,7 +298,7 @@ jobs:
run_nightly_onnx_tests:
name: Nightly ONNXRuntime CUDA tests on Ubuntu
runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]
container:
image: diffusers/diffusers-onnxruntime-cuda
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
Expand Down
12 changes: 6 additions & 6 deletions .github/workflows/push_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ jobs:

torch_cuda_tests:
name: Torch CUDA Tests
runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]
container:
image: diffusers/diffusers-pytorch-cuda
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
Expand Down Expand Up @@ -168,7 +168,7 @@ jobs:

peft_cuda_tests:
name: PEFT CUDA Tests
runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]
container:
image: diffusers/diffusers-pytorch-cuda
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
Expand Down Expand Up @@ -265,7 +265,7 @@ jobs:

onnx_cuda_tests:
name: ONNX CUDA Tests
runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]
container:
image: diffusers/diffusers-onnxruntime-cuda
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
Expand Down Expand Up @@ -313,7 +313,7 @@ jobs:
run_torch_compile_tests:
name: PyTorch Compile CUDA tests

runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]

container:
image: diffusers/diffusers-pytorch-compile-cuda
Expand Down Expand Up @@ -354,7 +354,7 @@ jobs:
run_xformers_tests:
name: PyTorch xformers CUDA tests

runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]

container:
image: diffusers/diffusers-pytorch-xformers-cuda
Expand Down Expand Up @@ -395,7 +395,7 @@ jobs:
run_examples_tests:
name: Examples PyTorch CUDA tests on Ubuntu

runs-on: docker-gpu
runs-on: [single-gpu, nvidia-gpu, t4, ci]

container:
image: diffusers/diffusers-pytorch-cuda
Expand Down

0 comments on commit ce97d7e

Please sign in to comment.