mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
* update
* update
* update
* update
* update
* merge main
* Revert "merge main"
This reverts commit 65efbcead5.
715 lines
27 KiB
Python
715 lines
27 KiB
Python
# coding=utf-8
|
|
# Copyright 2025 HuggingFace Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import gc
|
|
import random
|
|
import unittest
|
|
|
|
import numpy as np
|
|
import torch
|
|
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
|
|
|
from diffusers import (
|
|
AutoencoderKL,
|
|
AutoencoderTiny,
|
|
DDIMScheduler,
|
|
DPMSolverMultistepScheduler,
|
|
HeunDiscreteScheduler,
|
|
LCMScheduler,
|
|
LMSDiscreteScheduler,
|
|
PNDMScheduler,
|
|
StableDiffusionImg2ImgPipeline,
|
|
UNet2DConditionModel,
|
|
)
|
|
|
|
from ...testing_utils import (
|
|
backend_empty_cache,
|
|
backend_max_memory_allocated,
|
|
backend_reset_max_memory_allocated,
|
|
backend_reset_peak_memory_stats,
|
|
enable_full_determinism,
|
|
floats_tensor,
|
|
load_image,
|
|
load_numpy,
|
|
nightly,
|
|
require_torch_accelerator,
|
|
skip_mps,
|
|
slow,
|
|
torch_device,
|
|
)
|
|
from ..pipeline_params import (
|
|
IMAGE_TO_IMAGE_IMAGE_PARAMS,
|
|
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
|
|
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
|
|
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
|
|
)
|
|
from ..test_pipelines_common import (
|
|
IPAdapterTesterMixin,
|
|
PipelineKarrasSchedulerTesterMixin,
|
|
PipelineLatentTesterMixin,
|
|
PipelineTesterMixin,
|
|
)
|
|
|
|
|
|
enable_full_determinism()
|
|
|
|
|
|
class StableDiffusionImg2ImgPipelineFastTests(
|
|
IPAdapterTesterMixin,
|
|
PipelineLatentTesterMixin,
|
|
PipelineKarrasSchedulerTesterMixin,
|
|
PipelineTesterMixin,
|
|
unittest.TestCase,
|
|
):
|
|
pipeline_class = StableDiffusionImg2ImgPipeline
|
|
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
|
|
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
|
|
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
|
|
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
|
|
image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
|
|
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS
|
|
|
|
def get_dummy_components(self, time_cond_proj_dim=None):
|
|
torch.manual_seed(0)
|
|
unet = UNet2DConditionModel(
|
|
block_out_channels=(32, 64),
|
|
layers_per_block=2,
|
|
time_cond_proj_dim=time_cond_proj_dim,
|
|
sample_size=32,
|
|
in_channels=4,
|
|
out_channels=4,
|
|
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
|
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
|
cross_attention_dim=32,
|
|
)
|
|
scheduler = PNDMScheduler(skip_prk_steps=True)
|
|
torch.manual_seed(0)
|
|
vae = AutoencoderKL(
|
|
block_out_channels=[32, 64],
|
|
in_channels=3,
|
|
out_channels=3,
|
|
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
|
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
|
latent_channels=4,
|
|
)
|
|
torch.manual_seed(0)
|
|
text_encoder_config = CLIPTextConfig(
|
|
bos_token_id=0,
|
|
eos_token_id=2,
|
|
hidden_size=32,
|
|
intermediate_size=37,
|
|
layer_norm_eps=1e-05,
|
|
num_attention_heads=4,
|
|
num_hidden_layers=5,
|
|
pad_token_id=1,
|
|
vocab_size=1000,
|
|
)
|
|
text_encoder = CLIPTextModel(text_encoder_config)
|
|
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
|
|
|
components = {
|
|
"unet": unet,
|
|
"scheduler": scheduler,
|
|
"vae": vae,
|
|
"text_encoder": text_encoder,
|
|
"tokenizer": tokenizer,
|
|
"safety_checker": None,
|
|
"feature_extractor": None,
|
|
"image_encoder": None,
|
|
}
|
|
return components
|
|
|
|
def get_dummy_tiny_autoencoder(self):
|
|
return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4)
|
|
|
|
def get_dummy_inputs(self, device, seed=0):
|
|
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
|
|
image = image / 2 + 0.5
|
|
if str(device).startswith("mps"):
|
|
generator = torch.manual_seed(seed)
|
|
else:
|
|
generator = torch.Generator(device=device).manual_seed(seed)
|
|
inputs = {
|
|
"prompt": "A painting of a squirrel eating a burger",
|
|
"image": image,
|
|
"generator": generator,
|
|
"num_inference_steps": 2,
|
|
"guidance_scale": 6.0,
|
|
"output_type": "np",
|
|
}
|
|
return inputs
|
|
|
|
def test_stable_diffusion_img2img_default_case(self):
|
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
|
components = self.get_dummy_components()
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe = sd_pipe.to(device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(device)
|
|
image = sd_pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1]
|
|
|
|
assert image.shape == (1, 32, 32, 3)
|
|
expected_slice = np.array([0.4555, 0.3216, 0.4049, 0.4620, 0.4618, 0.4126, 0.4122, 0.4629, 0.4579])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_default_case_lcm(self):
|
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
|
components = self.get_dummy_components(time_cond_proj_dim=256)
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
|
|
sd_pipe = sd_pipe.to(device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(device)
|
|
image = sd_pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1]
|
|
|
|
assert image.shape == (1, 32, 32, 3)
|
|
expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_default_case_lcm_custom_timesteps(self):
|
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
|
components = self.get_dummy_components(time_cond_proj_dim=256)
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
|
|
sd_pipe = sd_pipe.to(device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(device)
|
|
del inputs["num_inference_steps"]
|
|
inputs["timesteps"] = [999, 499]
|
|
image = sd_pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1]
|
|
|
|
assert image.shape == (1, 32, 32, 3)
|
|
expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_negative_prompt(self):
|
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
|
components = self.get_dummy_components()
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe = sd_pipe.to(device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(device)
|
|
negative_prompt = "french fries"
|
|
output = sd_pipe(**inputs, negative_prompt=negative_prompt)
|
|
image = output.images
|
|
image_slice = image[0, -3:, -3:, -1]
|
|
|
|
assert image.shape == (1, 32, 32, 3)
|
|
expected_slice = np.array([0.4593, 0.3408, 0.4232, 0.4749, 0.4476, 0.4115, 0.4357, 0.4733, 0.4663])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
|
|
|
def test_ip_adapter(self):
|
|
expected_pipe_slice = None
|
|
if torch_device == "cpu":
|
|
expected_pipe_slice = np.array([0.4932, 0.5092, 0.5135, 0.5517, 0.5626, 0.6621, 0.6490, 0.5021, 0.5441])
|
|
return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice)
|
|
|
|
def test_stable_diffusion_img2img_multiple_init_images(self):
|
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
|
components = self.get_dummy_components()
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe = sd_pipe.to(device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(device)
|
|
inputs["prompt"] = [inputs["prompt"]] * 2
|
|
inputs["image"] = inputs["image"].repeat(2, 1, 1, 1)
|
|
image = sd_pipe(**inputs).images
|
|
image_slice = image[-1, -3:, -3:, -1]
|
|
|
|
assert image.shape == (2, 32, 32, 3)
|
|
expected_slice = np.array([0.4241, 0.5576, 0.5711, 0.4792, 0.4311, 0.5952, 0.5827, 0.5138, 0.5109])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_k_lms(self):
|
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
|
components = self.get_dummy_components()
|
|
components["scheduler"] = LMSDiscreteScheduler(
|
|
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear"
|
|
)
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe = sd_pipe.to(device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(device)
|
|
image = sd_pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1]
|
|
|
|
assert image.shape == (1, 32, 32, 3)
|
|
expected_slice = np.array([0.4398, 0.4949, 0.4337, 0.6580, 0.5555, 0.4338, 0.5769, 0.5955, 0.5175])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_tiny_autoencoder(self):
|
|
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
|
components = self.get_dummy_components()
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe.vae = self.get_dummy_tiny_autoencoder()
|
|
sd_pipe = sd_pipe.to(device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(device)
|
|
image = sd_pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1]
|
|
|
|
assert image.shape == (1, 32, 32, 3)
|
|
expected_slice = np.array([0.00669, 0.00669, 0.0, 0.00693, 0.00858, 0.0, 0.00567, 0.00515, 0.00125])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
|
|
|
@skip_mps
|
|
def test_save_load_local(self):
|
|
return super().test_save_load_local()
|
|
|
|
@skip_mps
|
|
def test_dict_tuple_outputs_equivalent(self):
|
|
return super().test_dict_tuple_outputs_equivalent()
|
|
|
|
@skip_mps
|
|
def test_save_load_optional_components(self):
|
|
return super().test_save_load_optional_components()
|
|
|
|
@skip_mps
|
|
def test_attention_slicing_forward_pass(self):
|
|
return super().test_attention_slicing_forward_pass(expected_max_diff=5e-3)
|
|
|
|
def test_inference_batch_single_identical(self):
|
|
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
|
|
|
def test_float16_inference(self):
|
|
super().test_float16_inference(expected_max_diff=5e-1)
|
|
|
|
def test_pipeline_interrupt(self):
|
|
components = self.get_dummy_components()
|
|
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
|
|
sd_pipe = sd_pipe.to(torch_device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_dummy_inputs(torch_device)
|
|
|
|
prompt = "hey"
|
|
num_inference_steps = 3
|
|
|
|
# store intermediate latents from the generation process
|
|
class PipelineState:
|
|
def __init__(self):
|
|
self.state = []
|
|
|
|
def apply(self, pipe, i, t, callback_kwargs):
|
|
self.state.append(callback_kwargs["latents"])
|
|
return callback_kwargs
|
|
|
|
pipe_state = PipelineState()
|
|
sd_pipe(
|
|
prompt,
|
|
image=inputs["image"],
|
|
num_inference_steps=num_inference_steps,
|
|
output_type="np",
|
|
generator=torch.Generator("cpu").manual_seed(0),
|
|
callback_on_step_end=pipe_state.apply,
|
|
).images
|
|
|
|
# interrupt generation at step index
|
|
interrupt_step_idx = 1
|
|
|
|
def callback_on_step_end(pipe, i, t, callback_kwargs):
|
|
if i == interrupt_step_idx:
|
|
pipe._interrupt = True
|
|
|
|
return callback_kwargs
|
|
|
|
output_interrupted = sd_pipe(
|
|
prompt,
|
|
image=inputs["image"],
|
|
num_inference_steps=num_inference_steps,
|
|
output_type="latent",
|
|
generator=torch.Generator("cpu").manual_seed(0),
|
|
callback_on_step_end=callback_on_step_end,
|
|
).images
|
|
|
|
# fetch intermediate latents at the interrupted step
|
|
# from the completed generation process
|
|
intermediate_latent = pipe_state.state[interrupt_step_idx]
|
|
|
|
# compare the intermediate latent to the output of the interrupted process
|
|
# they should be the same
|
|
assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4)
|
|
|
|
def test_encode_prompt_works_in_isolation(self):
|
|
extra_required_param_value_dict = {
|
|
"device": torch.device(torch_device).type,
|
|
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
|
|
}
|
|
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
|
|
|
|
|
|
@slow
|
|
@require_torch_accelerator
|
|
class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
|
|
def setUp(self):
|
|
super().setUp()
|
|
gc.collect()
|
|
backend_empty_cache(torch_device)
|
|
|
|
def tearDown(self):
|
|
super().tearDown()
|
|
gc.collect()
|
|
backend_empty_cache(torch_device)
|
|
|
|
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
|
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
|
init_image = load_image(
|
|
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
|
"/stable_diffusion_img2img/sketch-mountains-input.png"
|
|
)
|
|
inputs = {
|
|
"prompt": "a fantasy landscape, concept art, high resolution",
|
|
"image": init_image,
|
|
"generator": generator,
|
|
"num_inference_steps": 3,
|
|
"strength": 0.75,
|
|
"guidance_scale": 7.5,
|
|
"output_type": "np",
|
|
}
|
|
return inputs
|
|
|
|
def test_stable_diffusion_img2img_default(self):
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
|
pipe.to(torch_device)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
pipe.enable_attention_slicing()
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
image = pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1].flatten()
|
|
|
|
assert image.shape == (1, 512, 768, 3)
|
|
expected_slice = np.array([0.4300, 0.4662, 0.4930, 0.3990, 0.4307, 0.4525, 0.3719, 0.4064, 0.3923])
|
|
|
|
assert np.abs(expected_slice - image_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_k_lms(self):
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
|
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
|
pipe.to(torch_device)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
pipe.enable_attention_slicing()
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
image = pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1].flatten()
|
|
|
|
assert image.shape == (1, 512, 768, 3)
|
|
expected_slice = np.array([0.0389, 0.0346, 0.0415, 0.0290, 0.0218, 0.0210, 0.0408, 0.0567, 0.0271])
|
|
|
|
assert np.abs(expected_slice - image_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_ddim(self):
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
|
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
|
pipe.to(torch_device)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
pipe.enable_attention_slicing()
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
image = pipe(**inputs).images
|
|
image_slice = image[0, -3:, -3:, -1].flatten()
|
|
|
|
assert image.shape == (1, 512, 768, 3)
|
|
expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781])
|
|
|
|
assert np.abs(expected_slice - image_slice).max() < 1e-3
|
|
|
|
def test_stable_diffusion_img2img_intermediate_state(self):
|
|
number_of_steps = 0
|
|
|
|
def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None:
|
|
callback_fn.has_been_called = True
|
|
nonlocal number_of_steps
|
|
number_of_steps += 1
|
|
if step == 1:
|
|
latents = latents.detach().cpu().numpy()
|
|
assert latents.shape == (1, 4, 64, 96)
|
|
latents_slice = latents[0, -3:, -3:, -1]
|
|
expected_slice = np.array([-0.4958, 0.5107, 1.1045, 2.7539, 4.6680, 3.8320, 1.5049, 1.8633, 2.6523])
|
|
|
|
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
|
elif step == 2:
|
|
latents = latents.detach().cpu().numpy()
|
|
assert latents.shape == (1, 4, 64, 96)
|
|
latents_slice = latents[0, -3:, -3:, -1]
|
|
expected_slice = np.array([-0.4956, 0.5078, 1.0918, 2.7520, 4.6484, 3.8125, 1.5146, 1.8633, 2.6367])
|
|
|
|
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
|
|
|
callback_fn.has_been_called = False
|
|
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
|
)
|
|
pipe = pipe.to(torch_device)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
pipe.enable_attention_slicing()
|
|
|
|
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
|
pipe(**inputs, callback=callback_fn, callback_steps=1)
|
|
assert callback_fn.has_been_called
|
|
assert number_of_steps == 2
|
|
|
|
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
|
|
backend_empty_cache(torch_device)
|
|
backend_reset_max_memory_allocated(torch_device)
|
|
backend_reset_peak_memory_stats(torch_device)
|
|
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
|
|
)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
pipe.enable_attention_slicing(1)
|
|
pipe.enable_sequential_cpu_offload(device=torch_device)
|
|
|
|
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
|
_ = pipe(**inputs)
|
|
|
|
mem_bytes = backend_max_memory_allocated(torch_device)
|
|
# make sure that less than 2.2 GB is allocated
|
|
assert mem_bytes < 2.2 * 10**9
|
|
|
|
def test_stable_diffusion_pipeline_with_model_offloading(self):
|
|
backend_empty_cache(torch_device)
|
|
backend_reset_max_memory_allocated(torch_device)
|
|
backend_reset_peak_memory_stats(torch_device)
|
|
|
|
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
|
|
|
# Normal inference
|
|
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
"CompVis/stable-diffusion-v1-4",
|
|
safety_checker=None,
|
|
torch_dtype=torch.float16,
|
|
)
|
|
pipe.to(torch_device)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
pipe(**inputs)
|
|
mem_bytes = backend_max_memory_allocated(torch_device)
|
|
|
|
# With model offloading
|
|
|
|
# Reload but don't move to cuda
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
"CompVis/stable-diffusion-v1-4",
|
|
safety_checker=None,
|
|
torch_dtype=torch.float16,
|
|
)
|
|
|
|
backend_empty_cache(torch_device)
|
|
backend_reset_max_memory_allocated(torch_device)
|
|
backend_reset_peak_memory_stats(torch_device)
|
|
|
|
pipe.enable_model_cpu_offload(device=torch_device)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
_ = pipe(**inputs)
|
|
mem_bytes_offloaded = backend_max_memory_allocated(torch_device)
|
|
|
|
assert mem_bytes_offloaded < mem_bytes
|
|
for module in pipe.text_encoder, pipe.unet, pipe.vae:
|
|
assert module.device == torch.device("cpu")
|
|
|
|
def test_img2img_2nd_order(self):
|
|
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
|
sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
|
sd_pipe.to(torch_device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
inputs["num_inference_steps"] = 10
|
|
inputs["strength"] = 0.75
|
|
image = sd_pipe(**inputs).images[0]
|
|
|
|
expected_image = load_numpy(
|
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy"
|
|
)
|
|
max_diff = np.abs(expected_image - image).max()
|
|
assert max_diff < 5e-2
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
inputs["num_inference_steps"] = 11
|
|
inputs["strength"] = 0.75
|
|
image_other = sd_pipe(**inputs).images[0]
|
|
|
|
mean_diff = np.abs(image - image_other).mean()
|
|
|
|
# images should be very similar
|
|
assert mean_diff < 5e-2
|
|
|
|
def test_stable_diffusion_img2img_pipeline_multiple_of_8(self):
|
|
init_image = load_image(
|
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
|
"/img2img/sketch-mountains-input.jpg"
|
|
)
|
|
# resize to resolution that is divisible by 8 but not 16 or 32
|
|
init_image = init_image.resize((760, 504))
|
|
|
|
model_id = "CompVis/stable-diffusion-v1-4"
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
model_id,
|
|
safety_checker=None,
|
|
)
|
|
pipe.to(torch_device)
|
|
pipe.set_progress_bar_config(disable=None)
|
|
pipe.enable_attention_slicing()
|
|
|
|
prompt = "A fantasy landscape, trending on artstation"
|
|
|
|
generator = torch.manual_seed(0)
|
|
output = pipe(
|
|
prompt=prompt,
|
|
image=init_image,
|
|
strength=0.75,
|
|
guidance_scale=7.5,
|
|
generator=generator,
|
|
output_type="np",
|
|
)
|
|
image = output.images[0]
|
|
|
|
image_slice = image[255:258, 383:386, -1]
|
|
|
|
assert image.shape == (504, 760, 3)
|
|
expected_slice = np.array([0.9393, 0.9500, 0.9399, 0.9438, 0.9458, 0.9400, 0.9455, 0.9414, 0.9423])
|
|
|
|
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
|
|
|
|
def test_img2img_safety_checker_works(self):
|
|
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
|
sd_pipe.to(torch_device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
inputs["num_inference_steps"] = 20
|
|
# make sure the safety checker is activated
|
|
inputs["prompt"] = "naked, sex, porn"
|
|
out = sd_pipe(**inputs)
|
|
|
|
assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}"
|
|
assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros
|
|
|
|
|
|
@nightly
|
|
@require_torch_accelerator
|
|
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
|
def setUp(self):
|
|
super().setUp()
|
|
gc.collect()
|
|
backend_empty_cache(torch_device)
|
|
|
|
def tearDown(self):
|
|
super().tearDown()
|
|
gc.collect()
|
|
backend_empty_cache(torch_device)
|
|
|
|
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
|
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
|
init_image = load_image(
|
|
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
|
"/stable_diffusion_img2img/sketch-mountains-input.png"
|
|
)
|
|
inputs = {
|
|
"prompt": "a fantasy landscape, concept art, high resolution",
|
|
"image": init_image,
|
|
"generator": generator,
|
|
"num_inference_steps": 50,
|
|
"strength": 0.75,
|
|
"guidance_scale": 7.5,
|
|
"output_type": "np",
|
|
}
|
|
return inputs
|
|
|
|
def test_img2img_pndm(self):
|
|
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
|
sd_pipe.to(torch_device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
image = sd_pipe(**inputs).images[0]
|
|
|
|
expected_image = load_numpy(
|
|
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
|
"/stable_diffusion_img2img/stable_diffusion_1_5_pndm.npy"
|
|
)
|
|
max_diff = np.abs(expected_image - image).max()
|
|
assert max_diff < 1e-3
|
|
|
|
def test_img2img_ddim(self):
|
|
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
|
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
|
|
sd_pipe.to(torch_device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
image = sd_pipe(**inputs).images[0]
|
|
|
|
expected_image = load_numpy(
|
|
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
|
"/stable_diffusion_img2img/stable_diffusion_1_5_ddim.npy"
|
|
)
|
|
max_diff = np.abs(expected_image - image).max()
|
|
assert max_diff < 1e-3
|
|
|
|
def test_img2img_lms(self):
|
|
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
|
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
|
sd_pipe.to(torch_device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
image = sd_pipe(**inputs).images[0]
|
|
|
|
expected_image = load_numpy(
|
|
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
|
"/stable_diffusion_img2img/stable_diffusion_1_5_lms.npy"
|
|
)
|
|
max_diff = np.abs(expected_image - image).max()
|
|
assert max_diff < 1e-3
|
|
|
|
def test_img2img_dpm(self):
|
|
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
|
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
|
|
sd_pipe.to(torch_device)
|
|
sd_pipe.set_progress_bar_config(disable=None)
|
|
|
|
inputs = self.get_inputs(torch_device)
|
|
inputs["num_inference_steps"] = 30
|
|
image = sd_pipe(**inputs).images[0]
|
|
|
|
expected_image = load_numpy(
|
|
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
|
"/stable_diffusion_img2img/stable_diffusion_1_5_dpm.npy"
|
|
)
|
|
max_diff = np.abs(expected_image - image).max()
|
|
assert max_diff < 1e-3
|