Skip to content

[CI] Speed up slow tests #708

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Oct 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/pr_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
runs-on: [ self-hosted, docker-gpu ]
container:
image: python:3.7
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/

steps:
- name: Checkout diffusers
Expand Down
16 changes: 4 additions & 12 deletions .github/workflows/push_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,10 @@ env:
jobs:
run_tests_single_gpu:
name: Diffusers tests
strategy:
fail-fast: false
matrix:
machine_type: [ single-gpu ]
runs-on: [ self-hosted, docker-gpu, '${{ matrix.machine_type }}' ]
runs-on: [ self-hosted, docker-gpu, single-gpu ]
container:
image: nvcr.io/nvidia/pytorch:22.07-py3
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache

steps:
- name: Checkout diffusers
Expand Down Expand Up @@ -66,14 +62,10 @@ jobs:

run_examples_single_gpu:
name: Examples tests
strategy:
fail-fast: false
matrix:
machine_type: [ single-gpu ]
runs-on: [ self-hosted, docker-gpu, '${{ matrix.machine_type }}' ]
runs-on: [ self-hosted, docker-gpu, single-gpu ]
container:
image: nvcr.io/nvidia/pytorch:22.07-py3
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache

steps:
- name: Checkout diffusers
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@
"jaxlib>=0.1.65,<=0.3.6",
"modelcards>=0.1.4",
"numpy",
"onnxruntime-gpu",
"onnxruntime",
"pytest",
"pytest-timeout",
"pytest-xdist",
Expand Down Expand Up @@ -178,7 +178,7 @@ def run(self):
extras["training"] = deps_list("accelerate", "datasets", "tensorboard", "modelcards")
extras["test"] = deps_list(
"datasets",
"onnxruntime-gpu",
"onnxruntime",
"pytest",
"pytest-timeout",
"pytest-xdist",
Expand Down
2 changes: 1 addition & 1 deletion src/diffusers/dependency_versions_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"jaxlib": "jaxlib>=0.1.65,<=0.3.6",
"modelcards": "modelcards>=0.1.4",
"numpy": "numpy",
"onnxruntime-gpu": "onnxruntime-gpu",
"onnxruntime": "onnxruntime",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
Expand Down
12 changes: 6 additions & 6 deletions tests/test_pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -1422,18 +1422,18 @@ def test_stable_diffusion_inpaint_pipeline_k_lms(self):
@slow
def test_stable_diffusion_onnx(self):
sd_pipe = StableDiffusionOnnxPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CUDAExecutionProvider", use_auth_token=True
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider", use_auth_token=True
)

prompt = "A painting of a squirrel eating a burger"
np.random.seed(0)
output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=20, output_type="np")
output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=5, output_type="np")
image = output.images

image_slice = image[0, -3:, -3:, -1]

assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.0385, 0.0252, 0.0234, 0.0287, 0.0358, 0.0287, 0.0276, 0.0235, 0.0010])
expected_slice = np.array([0.3602, 0.3688, 0.3652, 0.3895, 0.3782, 0.3747, 0.3927, 0.4241, 0.4327])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3

@slow
Expand Down Expand Up @@ -1592,7 +1592,7 @@ def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None:
assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array(
[-0.6254, -0.2742, -1.0710, 0.2296, -1.1683, 0.6913, -2.0605, -0.0682, 0.9700]
[-0.5950, -0.3039, -1.1672, 0.1594, -1.1572, 0.6719, -1.9712, -0.0403, 0.9592]
)
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3

Expand All @@ -1606,6 +1606,6 @@ def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None:
prompt = "Andromeda galaxy in a bottle"

np.random.seed(0)
pipe(prompt=prompt, num_inference_steps=50, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1)
pipe(prompt=prompt, num_inference_steps=5, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1)
assert test_callback_fn.has_been_called
assert number_of_steps == 51
assert number_of_steps == 6