diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 280bffc2f6f9..a2f63705b035 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -1158,11 +1158,10 @@ def test_stable_diffusion_img2img_pipeline(self): model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_id, - revision="fp16", # fp16 to infer 768x512 images with 16GB of VRAM - torch_dtype=torch.float16, use_auth_token=True, ) pipe.to(torch_device) + pipe.enable_attention_slicing() pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" @@ -1204,10 +1203,9 @@ def test_stable_diffusion_img2img_pipeline_k_lms(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_id, scheduler=lms, - revision="fp16", # fp16 to infer 768x512 images with 16GB of VRAM - torch_dtype=torch.float16, use_auth_token=True, ) + pipe.enable_attention_slicing() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -1252,11 +1250,10 @@ def test_stable_diffusion_inpaint_pipeline(self): model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, - revision="fp16", # fp16 to infer 768x512 images in 16GB of VRAM - torch_dtype=torch.float16, use_auth_token=True, ) pipe.to(torch_device) + pipe.enable_attention_slicing() pipe.set_progress_bar_config(disable=None) prompt = "A red cat sitting on a parking bench"