From c3478a42b94048cd9dbe46fde84c4858f7e7cccf Mon Sep 17 00:00:00 2001 From: hlky Date: Mon, 13 Jan 2025 13:54:06 +0000 Subject: [PATCH] Fix Nightly AudioLDM2PipelineFastTests (#10556) * Fix Nightly AudioLDM2PipelineFastTests * add phonemizer to setup extras test * fix * make style --- setup.py | 2 ++ src/diffusers/dependency_versions_table.py | 1 + .../pipelines/audioldm2/pipeline_audioldm2.py | 18 +++++++++++++++--- tests/pipelines/audioldm2/test_audioldm2.py | 4 ++-- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 35ce34920f..d696c14ca8 100644 --- a/setup.py +++ b/setup.py @@ -135,6 +135,7 @@ _deps = [ "transformers>=4.41.2", "urllib3<=2.0.0", "black", + "phonemizer", ] # this is a lookup table with items like: @@ -227,6 +228,7 @@ extras["test"] = deps_list( "scipy", "torchvision", "transformers", + "phonemizer", ) extras["torch"] = deps_list("torch", "accelerate") diff --git a/src/diffusers/dependency_versions_table.py b/src/diffusers/dependency_versions_table.py index 9e7bf242ec..bb5a54f734 100644 --- a/src/diffusers/dependency_versions_table.py +++ b/src/diffusers/dependency_versions_table.py @@ -43,4 +43,5 @@ deps = { "transformers": "transformers>=4.41.2", "urllib3": "urllib3<=2.0.0", "black": "black", + "phonemizer": "phonemizer", } diff --git a/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py b/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py index 63a8b702f5..b8b5d07af5 100644 --- a/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py +++ b/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py @@ -237,7 +237,7 @@ class AudioLDM2Pipeline(DiffusionPipeline): """ self.vae.disable_slicing() - def enable_model_cpu_offload(self, gpu_id=0): + def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` @@ -249,11 +249,23 @@ class AudioLDM2Pipeline(DiffusionPipeline): else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - device = torch.device(f"cuda:{gpu_id}") + torch_device = torch.device(device) + device_index = torch_device.index + + if gpu_id is not None and device_index is not None: + raise ValueError( + f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" + f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" + ) + + device_type = torch_device.type + device = torch.device(f"{device_type}:{gpu_id or torch_device.index}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + device_mod = getattr(torch, device.type, None) + if hasattr(device_mod, "empty_cache") and device_mod.is_available(): + device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist) model_sequence = [ self.text_encoder.text_model, diff --git a/tests/pipelines/audioldm2/test_audioldm2.py b/tests/pipelines/audioldm2/test_audioldm2.py index fb550dd321..bf3ce2542d 100644 --- a/tests/pipelines/audioldm2/test_audioldm2.py +++ b/tests/pipelines/audioldm2/test_audioldm2.py @@ -469,8 +469,8 @@ class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pass def test_dict_tuple_outputs_equivalent(self): - # increase tolerance from 1e-4 -> 2e-4 to account for large composite model - super().test_dict_tuple_outputs_equivalent(expected_max_difference=2e-4) + # increase tolerance from 1e-4 -> 3e-4 to account for large composite model + super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-4) def test_inference_batch_single_identical(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model