Skip to content

Commit 1070e1a

Browse files
authored
[CI] Speed up slow tests (#708)
* [CI] Localize the HF cache * pip cache * de-env * refactor matrix * fix fast cache * less onnx steps * revert * revert pip cache * revert pip cache * remove debugging trigger
1 parent b35bac4 commit 1070e1a

File tree

5 files changed

+14
-22
lines changed

5 files changed

+14
-22
lines changed

.github/workflows/pr_tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
runs-on: [ self-hosted, docker-gpu ]
2222
container:
2323
image: python:3.7
24-
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
24+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
2525

2626
steps:
2727
- name: Checkout diffusers

.github/workflows/push_tests.yml

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,10 @@ env:
1515
jobs:
1616
run_tests_single_gpu:
1717
name: Diffusers tests
18-
strategy:
19-
fail-fast: false
20-
matrix:
21-
machine_type: [ single-gpu ]
22-
runs-on: [ self-hosted, docker-gpu, '${{ matrix.machine_type }}' ]
18+
runs-on: [ self-hosted, docker-gpu, single-gpu ]
2319
container:
2420
image: nvcr.io/nvidia/pytorch:22.07-py3
25-
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
21+
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache
2622

2723
steps:
2824
- name: Checkout diffusers
@@ -66,14 +62,10 @@ jobs:
6662

6763
run_examples_single_gpu:
6864
name: Examples tests
69-
strategy:
70-
fail-fast: false
71-
matrix:
72-
machine_type: [ single-gpu ]
73-
runs-on: [ self-hosted, docker-gpu, '${{ matrix.machine_type }}' ]
65+
runs-on: [ self-hosted, docker-gpu, single-gpu ]
7466
container:
7567
image: nvcr.io/nvidia/pytorch:22.07-py3
76-
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
68+
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache
7769

7870
steps:
7971
- name: Checkout diffusers

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@
9292
"jaxlib>=0.1.65,<=0.3.6",
9393
"modelcards>=0.1.4",
9494
"numpy",
95-
"onnxruntime-gpu",
95+
"onnxruntime",
9696
"pytest",
9797
"pytest-timeout",
9898
"pytest-xdist",
@@ -178,7 +178,7 @@ def run(self):
178178
extras["training"] = deps_list("accelerate", "datasets", "tensorboard", "modelcards")
179179
extras["test"] = deps_list(
180180
"datasets",
181-
"onnxruntime-gpu",
181+
"onnxruntime",
182182
"pytest",
183183
"pytest-timeout",
184184
"pytest-xdist",

src/diffusers/dependency_versions_table.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
"jaxlib": "jaxlib>=0.1.65,<=0.3.6",
1818
"modelcards": "modelcards>=0.1.4",
1919
"numpy": "numpy",
20-
"onnxruntime-gpu": "onnxruntime-gpu",
20+
"onnxruntime": "onnxruntime",
2121
"pytest": "pytest",
2222
"pytest-timeout": "pytest-timeout",
2323
"pytest-xdist": "pytest-xdist",

tests/test_pipelines.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1422,18 +1422,18 @@ def test_stable_diffusion_inpaint_pipeline_k_lms(self):
14221422
@slow
14231423
def test_stable_diffusion_onnx(self):
14241424
sd_pipe = StableDiffusionOnnxPipeline.from_pretrained(
1425-
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CUDAExecutionProvider", use_auth_token=True
1425+
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider", use_auth_token=True
14261426
)
14271427

14281428
prompt = "A painting of a squirrel eating a burger"
14291429
np.random.seed(0)
1430-
output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=20, output_type="np")
1430+
output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=5, output_type="np")
14311431
image = output.images
14321432

14331433
image_slice = image[0, -3:, -3:, -1]
14341434

14351435
assert image.shape == (1, 512, 512, 3)
1436-
expected_slice = np.array([0.0385, 0.0252, 0.0234, 0.0287, 0.0358, 0.0287, 0.0276, 0.0235, 0.0010])
1436+
expected_slice = np.array([0.3602, 0.3688, 0.3652, 0.3895, 0.3782, 0.3747, 0.3927, 0.4241, 0.4327])
14371437
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
14381438

14391439
@slow
@@ -1592,7 +1592,7 @@ def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None:
15921592
assert latents.shape == (1, 4, 64, 64)
15931593
latents_slice = latents[0, -3:, -3:, -1]
15941594
expected_slice = np.array(
1595-
[-0.6254, -0.2742, -1.0710, 0.2296, -1.1683, 0.6913, -2.0605, -0.0682, 0.9700]
1595+
[-0.5950, -0.3039, -1.1672, 0.1594, -1.1572, 0.6719, -1.9712, -0.0403, 0.9592]
15961596
)
15971597
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
15981598

@@ -1606,6 +1606,6 @@ def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None:
16061606
prompt = "Andromeda galaxy in a bottle"
16071607

16081608
np.random.seed(0)
1609-
pipe(prompt=prompt, num_inference_steps=50, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1)
1609+
pipe(prompt=prompt, num_inference_steps=5, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1)
16101610
assert test_callback_fn.has_been_called
1611-
assert number_of_steps == 51
1611+
assert number_of_steps == 6

0 commit comments

Comments
 (0)