mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
enable unidiffuser test cases on xpu (#11444)
* enable unidiffuser cases on XPU Signed-off-by: Yao Matrix <matrix.yao@intel.com> * fix a typo Signed-off-by: Yao Matrix <matrix.yao@intel.com> * fix style Signed-off-by: Yao Matrix <matrix.yao@intel.com> --------- Signed-off-by: Yao Matrix <matrix.yao@intel.com>
This commit is contained in:
@@ -1485,8 +1485,8 @@ class PipelineTesterMixin:
|
||||
model_devices = [component.device.type for component in components.values() if hasattr(component, "device")]
|
||||
self.assertTrue(all(device == torch_device for device in model_devices))
|
||||
|
||||
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
|
||||
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
|
||||
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
|
||||
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
|
||||
|
||||
def test_to_dtype(self):
|
||||
components = self.get_dummy_components()
|
||||
@@ -1677,11 +1677,11 @@ class PipelineTesterMixin:
|
||||
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
||||
pipe.enable_model_cpu_offload(device=torch_device)
|
||||
pipe.enable_model_cpu_offload()
|
||||
inputs = self.get_dummy_inputs(generator_device)
|
||||
output_with_offload = pipe(**inputs)[0]
|
||||
|
||||
pipe.enable_model_cpu_offload(device=torch_device)
|
||||
pipe.enable_model_cpu_offload()
|
||||
inputs = self.get_dummy_inputs(generator_device)
|
||||
output_with_offload_twice = pipe(**inputs)[0]
|
||||
|
||||
@@ -2226,7 +2226,7 @@ class PipelineTesterMixin:
|
||||
|
||||
def enable_group_offload_on_component(pipe, group_offloading_kwargs):
|
||||
# We intentionally don't test VAE's here. This is because some tests enable tiling on the VAE. If
|
||||
# tiling is enabled and a forward pass is run, when cuda streams are used, the execution order of
|
||||
# tiling is enabled and a forward pass is run, when accelerator streams are used, the execution order of
|
||||
# the layers is not traced correctly. This causes errors. For apply group offloading to VAE, a
|
||||
# warmup forward pass (even with dummy small inputs) is recommended.
|
||||
for component_name in [
|
||||
|
||||
@@ -22,13 +22,13 @@ from diffusers import (
|
||||
UniDiffuserTextDecoder,
|
||||
)
|
||||
from diffusers.utils.testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
load_image,
|
||||
nightly,
|
||||
require_torch_2,
|
||||
require_torch_accelerator,
|
||||
require_torch_gpu,
|
||||
run_test_in_subprocess,
|
||||
torch_device,
|
||||
)
|
||||
@@ -577,24 +577,24 @@ class UniDiffuserPipelineFastTests(
|
||||
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
|
||||
|
||||
@unittest.skip(
|
||||
"Test not supported becauseit has a bunch of direct configs at init and also, this pipeline isn't used that much now."
|
||||
"Test not supported because it has a bunch of direct configs at init and also, this pipeline isn't used that much now."
|
||||
)
|
||||
def test_encode_prompt_works_in_isolation():
|
||||
pass
|
||||
|
||||
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
@require_torch_accelerator
|
||||
class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def get_inputs(self, device, seed=0, generate_latents=False):
|
||||
generator = torch.manual_seed(seed)
|
||||
@@ -705,17 +705,17 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
||||
|
||||
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
@require_torch_accelerator
|
||||
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def get_inputs(self, device, seed=0, generate_latents=False):
|
||||
generator = torch.manual_seed(seed)
|
||||
|
||||
Reference in New Issue
Block a user