From 0ec64fe9fc5e2ddf1019f71772f30d71520afacc Mon Sep 17 00:00:00 2001 From: Aryan Date: Thu, 22 Aug 2024 15:17:47 +0530 Subject: [PATCH] [tests] fix broken xformers tests (#9206) * fix xformers tests * remove unnecessary modifications to cogvideox tests * update --- .../pipelines/animatediff/test_animatediff_controlnet.py | 8 ++++++++ .../pipelines/animatediff/test_animatediff_sparsectrl.py | 8 ++++++++ tests/pipelines/cogvideox/test_cogvideox.py | 4 ++++ tests/pipelines/latte/test_latte.py | 8 ++++++++ 4 files changed, 28 insertions(+) diff --git a/tests/pipelines/animatediff/test_animatediff_controlnet.py b/tests/pipelines/animatediff/test_animatediff_controlnet.py index 72315bd0c9..3035fc1e3c 100644 --- a/tests/pipelines/animatediff/test_animatediff_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -20,6 +20,7 @@ from diffusers import ( ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -329,6 +330,13 @@ class AnimateDiffControlNetPipelineFastTests( inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) pipe(**inputs) + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + def test_free_init(self): components = self.get_dummy_components() pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py index 5d8a722811..e4cc06e1e7 100644 --- a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py +++ b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py @@ -19,6 +19,7 @@ from diffusers import ( UNetMotionModel, ) from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -393,6 +394,13 @@ class AnimateDiffSparseControlNetPipelineFastTests( inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) pipe(**inputs) + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + def test_free_init(self): components = self.get_dummy_components() pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components) diff --git a/tests/pipelines/cogvideox/test_cogvideox.py b/tests/pipelines/cogvideox/test_cogvideox.py index 3ae500eb95..17d0d8f21d 100644 --- a/tests/pipelines/cogvideox/test_cogvideox.py +++ b/tests/pipelines/cogvideox/test_cogvideox.py @@ -275,6 +275,10 @@ class CogVideoXPipelineFastTests(PipelineTesterMixin, unittest.TestCase): "VAE tiling should not affect the inference results", ) + @unittest.skip("xformers attention processor does not exist for CogVideoX") + def test_xformers_attention_forwardGenerator_pass(self): + pass + @slow @require_torch_gpu diff --git a/tests/pipelines/latte/test_latte.py b/tests/pipelines/latte/test_latte.py index 94ff7fc0fa..9667ebff24 100644 --- a/tests/pipelines/latte/test_latte.py +++ b/tests/pipelines/latte/test_latte.py @@ -28,6 +28,7 @@ from diffusers import ( LattePipeline, LatteTransformer3DModel, ) +from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, numpy_cosine_similarity_distance, @@ -256,6 +257,13 @@ class LattePipelineFastTests(PipelineTesterMixin, unittest.TestCase): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1.0) + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + @slow @require_torch_gpu