From c3726153fda803ddbb204650d088d9d8111ccdcb Mon Sep 17 00:00:00 2001 From: Yao Matrix Date: Mon, 12 May 2025 19:21:37 +0800 Subject: [PATCH] enable several pipeline integration tests on XPU (#11526) * enable kandinsky2_2 integration test cases on XPU Signed-off-by: Yao Matrix * fix style Signed-off-by: Yao Matrix * enable latent_diffusion, dance_diffusion, musicldm, shap_e integration uts on xpu Signed-off-by: Yao Matrix * fix style Signed-off-by: Yao Matrix --------- Signed-off-by: Yao Matrix Co-authored-by: Aryan --- .../dance_diffusion/test_dance_diffusion.py | 15 +++++++++++---- .../kandinsky2_2/test_kandinsky_controlnet.py | 10 ++++++---- .../test_kandinsky_controlnet_img2img.py | 12 +++++++----- .../latent_diffusion/test_latent_diffusion.py | 15 ++++++++------- tests/pipelines/musicldm/test_musicldm.py | 14 ++++++++++---- tests/pipelines/shap_e/test_shap_e.py | 14 ++++++++++---- tests/pipelines/shap_e/test_shap_e_img2img.py | 9 +++++---- 7 files changed, 57 insertions(+), 32 deletions(-) diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index 1f60c0b421..881946e6a0 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -20,7 +20,14 @@ import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device +from diffusers.utils.testing_utils import ( + backend_empty_cache, + enable_full_determinism, + nightly, + require_torch_accelerator, + skip_mps, + torch_device, +) from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -116,19 +123,19 @@ class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): @nightly -@require_torch_gpu +@require_torch_accelerator class PipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_dance_diffusion(self): device = torch_device diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py index 10a95d6177..6454152b7a 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py @@ -28,13 +28,15 @@ from diffusers import ( VQModel, ) from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, - require_torch_gpu, + require_torch_accelerator, + torch_device, ) from ..test_pipelines_common import PipelineTesterMixin @@ -226,19 +228,19 @@ class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.Test @nightly -@require_torch_gpu +@require_torch_accelerator class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_kandinsky_controlnet(self): expected_image = load_numpy( diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py index 58fbbecc05..c99b7b738a 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py @@ -29,13 +29,15 @@ from diffusers import ( VQModel, ) from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, - require_torch_gpu, + require_torch_accelerator, + torch_device, ) from ..test_pipelines_common import PipelineTesterMixin @@ -233,19 +235,19 @@ class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unitte @nightly -@require_torch_gpu +@require_torch_accelerator class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_kandinsky_controlnet_img2img(self): expected_image = load_numpy( @@ -309,4 +311,4 @@ class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): assert image.shape == (512, 512, 3) max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) - assert max_diff < 1e-4 + assert max_diff < 5e-4 diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index e751240e43..245116d5fa 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -22,10 +22,11 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, load_numpy, nightly, - require_torch_gpu, + require_torch_accelerator, torch_device, ) @@ -136,17 +137,17 @@ class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): @nightly -@require_torch_gpu +@require_torch_accelerator class LDMTextToImagePipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) @@ -177,17 +178,17 @@ class LDMTextToImagePipelineSlowTests(unittest.TestCase): @nightly -@require_torch_gpu +@require_torch_accelerator class LDMTextToImagePipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) diff --git a/tests/pipelines/musicldm/test_musicldm.py b/tests/pipelines/musicldm/test_musicldm.py index bdd536b6ff..7f553e919c 100644 --- a/tests/pipelines/musicldm/test_musicldm.py +++ b/tests/pipelines/musicldm/test_musicldm.py @@ -39,7 +39,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import ( + backend_empty_cache, + enable_full_determinism, + nightly, + require_torch_accelerator, + torch_device, +) from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -408,17 +414,17 @@ class MusicLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): @nightly -@require_torch_gpu +@require_torch_accelerator class MusicLDMPipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) diff --git a/tests/pipelines/shap_e/test_shap_e.py b/tests/pipelines/shap_e/test_shap_e.py index 6cf643fe47..638de7e8cc 100644 --- a/tests/pipelines/shap_e/test_shap_e.py +++ b/tests/pipelines/shap_e/test_shap_e.py @@ -21,7 +21,13 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokeni from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer -from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import ( + backend_empty_cache, + load_numpy, + nightly, + require_torch_accelerator, + torch_device, +) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference @@ -222,19 +228,19 @@ class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): @nightly -@require_torch_gpu +@require_torch_accelerator class ShapEPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_shap_e(self): expected_image = load_numpy( diff --git a/tests/pipelines/shap_e/test_shap_e_img2img.py b/tests/pipelines/shap_e/test_shap_e_img2img.py index 72eee3e35e..ed0a4d47b6 100644 --- a/tests/pipelines/shap_e/test_shap_e_img2img.py +++ b/tests/pipelines/shap_e/test_shap_e_img2img.py @@ -23,11 +23,12 @@ from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import ( + backend_empty_cache, floats_tensor, load_image, load_numpy, nightly, - require_torch_gpu, + require_torch_accelerator, torch_device, ) @@ -250,19 +251,19 @@ class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): @nightly -@require_torch_gpu +@require_torch_accelerator class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_shap_e_img2img(self): input_image = load_image(