diff --git a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py index 1b9c791361..7f080bf8de 100644 --- a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py +++ b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py @@ -99,9 +99,9 @@ class SDXLModularTests: assert image.shape == expected_image_shape - assert np.abs(image_slice.flatten() - expected_slice).max() < expected_max_diff, ( - "Image Slice does not match expected slice" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < expected_max_diff + ), "Image Slice does not match expected slice" class SDXLModularIPAdapterTests: @@ -114,20 +114,20 @@ class SDXLModularIPAdapterTests: parameters = blocks.input_names assert issubclass(self.pipeline_class, ModularIPAdapterMixin) - assert "ip_adapter_image" in parameters, ( - "`ip_adapter_image` argument must be supported by the `__call__` method" - ) + assert ( + "ip_adapter_image" in parameters + ), "`ip_adapter_image` argument must be supported by the `__call__` method" assert "ip_adapter" in blocks.sub_blocks, "pipeline must contain an IPAdapter block" _ = blocks.sub_blocks.pop("ip_adapter") parameters = blocks.input_names intermediate_parameters = blocks.intermediate_input_names - assert "ip_adapter_image" not in parameters, ( - "`ip_adapter_image` argument must be removed from the `__call__` method" - ) - assert "ip_adapter_image_embeds" not in intermediate_parameters, ( - "`ip_adapter_image_embeds` argument must be supported by the `__call__` method" - ) + assert ( + "ip_adapter_image" not in parameters + ), "`ip_adapter_image` argument must be removed from the `__call__` method" + assert ( + "ip_adapter_image_embeds" not in intermediate_parameters + ), "`ip_adapter_image_embeds` argument must be supported by the `__call__` method" def _get_dummy_image_embeds(self, cross_attention_dim: int = 32): return torch.randn((1, 1, cross_attention_dim), device=torch_device) @@ -203,9 +203,9 @@ class SDXLModularIPAdapterTests: max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() - assert max_diff_without_adapter_scale < expected_max_diff, ( - "Output without ip-adapter must be same as normal inference" - ) + assert ( + max_diff_without_adapter_scale < expected_max_diff + ), "Output without ip-adapter must be same as normal inference" assert max_diff_with_adapter_scale > 1e-2, "Output with ip-adapter must be different from normal inference" # 2. Multi IP-Adapter test cases @@ -235,12 +235,12 @@ class SDXLModularIPAdapterTests: output_without_multi_adapter_scale - output_without_adapter ).max() max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() - assert max_diff_without_multi_adapter_scale < expected_max_diff, ( - "Output without multi-ip-adapter must be same as normal inference" - ) - assert max_diff_with_multi_adapter_scale > 1e-2, ( - "Output with multi-ip-adapter scale must be different from normal inference" - ) + assert ( + max_diff_without_multi_adapter_scale < expected_max_diff + ), "Output without multi-ip-adapter must be same as normal inference" + assert ( + max_diff_with_multi_adapter_scale > 1e-2 + ), "Output with multi-ip-adapter scale must be different from normal inference" class SDXLModularControlNetTests: @@ -253,9 +253,9 @@ class SDXLModularControlNetTests: parameters = blocks.input_names assert "control_image" in parameters, "`control_image` argument must be supported by the `__call__` method" - assert "controlnet_conditioning_scale" in parameters, ( - "`controlnet_conditioning_scale` argument must be supported by the `__call__` method" - ) + assert ( + "controlnet_conditioning_scale" in parameters + ), "`controlnet_conditioning_scale` argument must be supported by the `__call__` method" def _modify_inputs_for_controlnet_test(self, inputs: Dict[str, Any]): controlnet_embedder_scale_factor = 2 @@ -301,9 +301,9 @@ class SDXLModularControlNetTests: max_diff_without_controlnet_scale = np.abs(output_without_controlnet_scale - output_without_controlnet).max() max_diff_with_controlnet_scale = np.abs(output_with_controlnet_scale - output_without_controlnet).max() - assert max_diff_without_controlnet_scale < expected_max_diff, ( - "Output without controlnet must be same as normal inference" - ) + assert ( + max_diff_without_controlnet_scale < expected_max_diff + ), "Output without controlnet must be same as normal inference" assert max_diff_with_controlnet_scale > 1e-2, "Output with controlnet must be different from normal inference" def test_controlnet_cfg(self): @@ -383,26 +383,6 @@ class SDXLModularPipelineFastTests( def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) - @require_torch_accelerator - def test_stable_diffusion_xl_offloads(self): - pipes = [] - sd_pipe = self.get_pipeline().to(torch_device) - pipes.append(sd_pipe) - - cm = ComponentsManager() - cm.enable_auto_cpu_offload(device=torch_device) - sd_pipe = self.get_pipeline(components_manager=cm) - pipes.append(sd_pipe) - - image_slices = [] - for pipe in pipes: - inputs = self.get_dummy_inputs(torch_device) - image = pipe(**inputs, output="images") - - image_slices.append(image[0, -3:, -3:, -1].flatten()) - - assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 - def test_stable_diffusion_xl_save_from_pretrained(self): pipes = [] sd_pipe = self.get_pipeline().to(torch_device) diff --git a/tests/modular_pipelines/test_modular_pipelines_common.py b/tests/modular_pipelines/test_modular_pipelines_common.py index 5a8227f5ce..684ba3c583 100644 --- a/tests/modular_pipelines/test_modular_pipelines_common.py +++ b/tests/modular_pipelines/test_modular_pipelines_common.py @@ -320,11 +320,40 @@ class ModularPipelineTesterMixin: assert images.shape[0] == batch_size * num_images_per_prompt @require_accelerator - def test_components_auto_cpu_offload(self): + def test_components_auto_cpu_offload_inference_consistent(self): base_pipe = self.get_pipeline().to(torch_device) - for component in base_pipe.components: - assert component.device == torch_device cm = ComponentsManager() cm.enable_auto_cpu_offload(device=torch_device) offload_pipe = self.get_pipeline(components_manager=cm) + + image_slices = [] + for pipe in [base_pipe, offload_pipe]: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs, output="images") + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + + def test_save_from_pretrained(self): + pipes = [] + base_pipe = self.get_pipeline().to(torch_device) + pipes.append(base_pipe) + + with tempfile.TemporaryDirectory() as tmpdirname: + base_pipe.save_pretrained(tmpdirname) + pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device) + pipe.load_default_components(torch_dtype=torch.float32) + pipe.to(torch_device) + + pipes.append(pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs, output="images") + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3