diff --git a/tests/pipelines/controlnet/test_controlnet_sdxl.py b/tests/pipelines/controlnet/test_controlnet_sdxl.py index f91208c56f..c31c728c5b 100644 --- a/tests/pipelines/controlnet/test_controlnet_sdxl.py +++ b/tests/pipelines/controlnet/test_controlnet_sdxl.py @@ -827,7 +827,7 @@ class ControlNetSDXLPipelineSlowTests(unittest.TestCase): assert np.allclose(original_image, expected_image, atol=1e-04) def test_download_ckpt_diff_format_is_same(self): - controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0") + controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16) single_file_url = ( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" ) @@ -844,7 +844,7 @@ class ControlNetSDXLPipelineSlowTests(unittest.TestCase): "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) single_file_images = pipe_single_file( - prompt, image=image, generator=generator, output_type="np", num_inference_steps=3 + prompt, image=image, generator=generator, output_type="np", num_inference_steps=2 ).images generator = torch.Generator(device="cpu").manual_seed(0) @@ -853,13 +853,13 @@ class ControlNetSDXLPipelineSlowTests(unittest.TestCase): ) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() - images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images + images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=2).images assert images[0].shape == (512, 512, 3) assert single_file_images[0].shape == (512, 512, 3) max_diff = numpy_cosine_similarity_distance(images[0].flatten(), single_file_images[0].flatten()) - assert max_diff < 1e-4 + assert max_diff < 5e-2 class StableDiffusionSSD1BControlNetPipelineFastTests(StableDiffusionXLControlNetPipelineFastTests): diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py index d1920d59b4..b500612116 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -697,3 +697,40 @@ class AdapterSDXLPipelineSlowTests(unittest.TestCase): image_slice = images[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.4284, 0.4337, 0.4319, 0.4255, 0.4329, 0.4280, 0.4338, 0.4420, 0.4226]) assert numpy_cosine_similarity_distance(image_slice, expected_slice) < 1e-4 + + def test_download_ckpt_diff_format_is_same(self): + ckpt_path = ( + "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" + ) + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + prompt = "toy" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" + ) + pipe_single_file = StableDiffusionXLAdapterPipeline.from_single_file( + ckpt_path, + adapter=adapter, + torch_dtype=torch.float16, + ) + pipe_single_file.enable_model_cpu_offload() + pipe_single_file.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + images_single_file = pipe_single_file( + prompt, image=image, generator=generator, output_type="np", num_inference_steps=3 + ).images + + generator = torch.Generator(device="cpu").manual_seed(0) + pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + adapter=adapter, + torch_dtype=torch.float16, + ) + pipe.enable_model_cpu_offload() + images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images + + assert images_single_file[0].shape == (768, 512, 3) + assert images[0].shape == (768, 512, 3) + + max_diff = numpy_cosine_similarity_distance(images[0].flatten(), images_single_file[0].flatten()) + assert max_diff < 5e-3