From c78ee143e9d3cb52147cbdcda13707d02f96961c Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 28 Sep 2023 19:00:41 +0530 Subject: [PATCH] Move more slow tests to nightly (#5220) * move to nightly * fix mistake --- tests/pipelines/audio_diffusion/test_audio_diffusion.py | 4 ++-- tests/pipelines/audioldm/test_audioldm.py | 4 ++-- .../kandinsky_v22/test_kandinsky_controlnet_img2img.py | 4 ++-- .../latent_diffusion/test_latent_diffusion_uncond.py | 4 ++-- .../stable_diffusion/test_stable_diffusion_k_diffusion.py | 4 ++-- .../stable_diffusion/test_stable_diffusion_model_editing.py | 4 ++-- .../stable_diffusion/test_stable_diffusion_paradigms.py | 4 ++-- tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py | 4 ++-- .../test_stable_diffusion_attend_and_excite.py | 4 ++-- .../stable_diffusion_2/test_stable_diffusion_diffedit.py | 3 +-- tests/pipelines/stable_unclip/test_stable_unclip.py | 4 ++-- tests/pipelines/stable_unclip/test_stable_unclip_img2img.py | 4 ++-- tests/pipelines/text_to_video/test_text_to_video_zero.py | 4 ++-- tests/pipelines/text_to_video/test_video_to_video.py | 4 ++-- 14 files changed, 27 insertions(+), 28 deletions(-) diff --git a/tests/pipelines/audio_diffusion/test_audio_diffusion.py b/tests/pipelines/audio_diffusion/test_audio_diffusion.py index 271e458bf5..2cf3e4a956 100644 --- a/tests/pipelines/audio_diffusion/test_audio_diffusion.py +++ b/tests/pipelines/audio_diffusion/test_audio_diffusion.py @@ -29,7 +29,7 @@ from diffusers import ( UNet2DConditionModel, UNet2DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() @@ -95,7 +95,7 @@ class PipelineFastTests(unittest.TestCase): ) return vqvae, unet - @slow + @nightly def test_audio_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator mel = Mel( diff --git a/tests/pipelines/audioldm/test_audioldm.py b/tests/pipelines/audioldm/test_audioldm.py index 0a2a44bf48..4b1764010f 100644 --- a/tests/pipelines/audioldm/test_audioldm.py +++ b/tests/pipelines/audioldm/test_audioldm.py @@ -37,7 +37,7 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import enable_full_determinism, nightly, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -369,7 +369,7 @@ class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) -@slow +@nightly class AudioLDMPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py index 0c7b995800..52ed27b465 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py @@ -33,8 +33,8 @@ from diffusers.utils.testing_utils import ( floats_tensor, load_image, load_numpy, + nightly, require_torch_gpu, - slow, torch_device, ) @@ -232,7 +232,7 @@ class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unitte super().test_float16_inference(expected_max_diff=2e-1) -@slow +@nightly @require_torch_gpu class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py index ff8670ea29..4d284a494f 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py @@ -20,7 +20,7 @@ import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel -from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device enable_full_determinism() @@ -96,7 +96,7 @@ class LDMPipelineFastTests(unittest.TestCase): assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance -@slow +@nightly @require_torch class LDMPipelineIntegrationTests(unittest.TestCase): def test_inference_uncond(self): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py index 672c0ebfa0..e5d11e9802 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py @@ -20,13 +20,13 @@ import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() -@slow +@nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py index b7ddd2fd59..27c6a65b63 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py @@ -28,7 +28,7 @@ from diffusers import ( StableDiffusionModelEditingPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin @@ -184,7 +184,7 @@ class StableDiffusionModelEditingPipelineFastTests( super().test_attention_slicing_forward_pass(expected_max_diff=5e-3) -@slow +@nightly @require_torch_gpu class StableDiffusionModelEditingSlowTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py index 3ce476d09b..ae9bc83fe0 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py @@ -29,8 +29,8 @@ from diffusers import ( ) from diffusers.utils.testing_utils import ( enable_full_determinism, + nightly, require_torch_gpu, - slow, torch_device, ) @@ -188,7 +188,7 @@ class StableDiffusionParadigmsPipelineFastTests(PipelineLatentTesterMixin, Pipel assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 -@slow +@nightly @require_torch_gpu class StableDiffusionParadigmsPipelineSlowTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py index b87d11e858..6eae1ce4d3 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py @@ -26,7 +26,7 @@ from diffusers import ( StableDiffusionSAGPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin @@ -115,7 +115,7 @@ class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTes super().test_inference_batch_single_identical(expected_max_diff=3e-3) -@slow +@nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py index a8f489012b..1e709873a7 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py @@ -28,10 +28,10 @@ from diffusers import ( ) from diffusers.utils.testing_utils import ( load_numpy, + nightly, numpy_cosine_similarity_distance, require_torch_gpu, skip_mps, - slow, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -187,7 +187,7 @@ class StableDiffusionAttendAndExcitePipelineFastTests( @require_torch_gpu -@slow +@nightly class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase): # Attend and excite requires being able to run a backward pass at # inference time. There's no deterministic backward operator for pad diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py index abcb5e7ead..3a31913717 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py @@ -39,7 +39,6 @@ from diffusers.utils.testing_utils import ( nightly, numpy_cosine_similarity_distance, require_torch_gpu, - slow, torch_device, ) @@ -292,7 +291,7 @@ class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, Pipeli @require_torch_gpu -@slow +@nightly class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() diff --git a/tests/pipelines/stable_unclip/test_stable_unclip.py b/tests/pipelines/stable_unclip/test_stable_unclip.py index f7affbe997..f05edf6861 100644 --- a/tests/pipelines/stable_unclip/test_stable_unclip.py +++ b/tests/pipelines/stable_unclip/test_stable_unclip.py @@ -13,7 +13,7 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer -from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -185,7 +185,7 @@ class StableUnCLIPPipelineFastTests( self._test_inference_batch_single_identical(expected_max_diff=1e-3) -@slow +@nightly @require_torch_gpu class StableUnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py b/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py index 9bbde46e4d..12f6a91017 100644 --- a/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py +++ b/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py @@ -22,9 +22,9 @@ from diffusers.utils.testing_utils import ( floats_tensor, load_image, load_numpy, + nightly, require_torch_gpu, skip_mps, - slow, torch_device, ) @@ -206,7 +206,7 @@ class StableUnCLIPImg2ImgPipelineFastTests( self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False) -@slow +@nightly @require_torch_gpu class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/text_to_video/test_text_to_video_zero.py b/tests/pipelines/text_to_video/test_text_to_video_zero.py index 02fb43a0b6..c4627c8869 100644 --- a/tests/pipelines/text_to_video/test_text_to_video_zero.py +++ b/tests/pipelines/text_to_video/test_text_to_video_zero.py @@ -18,12 +18,12 @@ import unittest import torch from diffusers import DDIMScheduler, TextToVideoZeroPipeline -from diffusers.utils.testing_utils import load_pt, require_torch_gpu, slow +from diffusers.utils.testing_utils import load_pt, nightly, require_torch_gpu from ..test_pipelines_common import assert_mean_pixel_difference -@slow +@nightly @require_torch_gpu class TextToVideoZeroPipelineSlowTests(unittest.TestCase): def test_full_model(self): diff --git a/tests/pipelines/text_to_video/test_video_to_video.py b/tests/pipelines/text_to_video/test_video_to_video.py index c505652806..b5fe345177 100644 --- a/tests/pipelines/text_to_video/test_video_to_video.py +++ b/tests/pipelines/text_to_video/test_video_to_video.py @@ -31,9 +31,9 @@ from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, is_flaky, + nightly, numpy_cosine_similarity_distance, skip_mps, - slow, torch_device, ) @@ -195,7 +195,7 @@ class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): return super().test_progress_bar() -@slow +@nightly @skip_mps class VideoToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self):