From 89689a54ef6f8529d2d80e6115fb5920847b67c3 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 8 Sep 2022 14:39:32 +0000 Subject: [PATCH 1/2] [Tests] Correct image folder tests --- tests/test_pipelines.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 280bffc2f6f9..c4f453e0aa65 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -1163,6 +1163,7 @@ def test_stable_diffusion_img2img_pipeline(self): use_auth_token=True, ) pipe.to(torch_device) + pipe.enable_attention_slicing() pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" @@ -1204,10 +1205,9 @@ def test_stable_diffusion_img2img_pipeline_k_lms(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_id, scheduler=lms, - revision="fp16", # fp16 to infer 768x512 images with 16GB of VRAM - torch_dtype=torch.float16, use_auth_token=True, ) + pipe.enable_attention_slicing() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -1252,11 +1252,10 @@ def test_stable_diffusion_inpaint_pipeline(self): model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, - revision="fp16", # fp16 to infer 768x512 images in 16GB of VRAM - torch_dtype=torch.float16, use_auth_token=True, ) pipe.to(torch_device) + pipe.enable_attention_slicing() pipe.set_progress_bar_config(disable=None) prompt = "A red cat sitting on a parking bench" From 637132b404100deaca1c274789e0264f3318b79d Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 8 Sep 2022 14:40:34 +0000 Subject: [PATCH 2/2] up --- tests/test_pipelines.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index c4f453e0aa65..a2f63705b035 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -1158,8 +1158,6 @@ def test_stable_diffusion_img2img_pipeline(self): model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_id, - revision="fp16", # fp16 to infer 768x512 images with 16GB of VRAM - torch_dtype=torch.float16, use_auth_token=True, ) pipe.to(torch_device)