diff --git a/tests/pipelines/animatediff/test_animatediff_controlnet.py b/tests/pipelines/animatediff/test_animatediff_controlnet.py index 72315bd0c965..3035fc1e3c61 100644 --- a/tests/pipelines/animatediff/test_animatediff_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -20,6 +20,7 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -329,6 +330,13 @@ def test_prompt_embeds(self): inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) pipe(**inputs) + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + def test_free_init(self): components = self.get_dummy_components() pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py index 5d8a7228118d..e4cc06e1e797 100644 --- a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py +++ b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py @@ -19,6 +19,7 @@ UNetMotionModel, ) from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -393,6 +394,13 @@ def test_prompt_embeds(self): inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) pipe(**inputs) + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + def test_free_init(self): components = self.get_dummy_components() pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components) diff --git a/tests/pipelines/cogvideox/test_cogvideox.py b/tests/pipelines/cogvideox/test_cogvideox.py index 3ae500eb9567..17d0d8f21d5c 100644 --- a/tests/pipelines/cogvideox/test_cogvideox.py +++ b/tests/pipelines/cogvideox/test_cogvideox.py @@ -275,6 +275,10 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2): "VAE tiling should not affect the inference results", ) + @unittest.skip("xformers attention processor does not exist for CogVideoX") + def test_xformers_attention_forwardGenerator_pass(self): + pass + @slow @require_torch_gpu diff --git a/tests/pipelines/latte/test_latte.py b/tests/pipelines/latte/test_latte.py index 94ff7fc0faf9..9667ebff249d 100644 --- a/tests/pipelines/latte/test_latte.py +++ b/tests/pipelines/latte/test_latte.py @@ -28,6 +28,7 @@ LattePipeline, LatteTransformer3DModel, ) +from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, numpy_cosine_similarity_distance, @@ -256,6 +257,13 @@ def test_save_load_optional_components(self): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1.0) + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + @slow @require_torch_gpu