From d36cf41b83e0c7fa1c40a3bc25c134c016a7e7a7 Mon Sep 17 00:00:00 2001 From: anton-l Date: Mon, 21 Nov 2022 13:40:10 +0100 Subject: [PATCH] refactor text2img --- .../pipeline_versatile_diffusion.py | 0 ...eline_versatile_diffusion_image_to_text.py | 412 +++++++++ ...ine_versatile_diffusion_image_variation.py | 1 - ...eline_versatile_diffusion_text_to_image.py | 859 ++++++++---------- .../dummy_torch_and_transformers_objects.py | 30 + .../test_versatile_diffusion_text_to_image.py | 8 +- 6 files changed, 806 insertions(+), 504 deletions(-) create mode 100644 src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py create mode 100644 src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_to_text.py diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_to_text.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_to_text.py new file mode 100644 index 0000000000..801fe2d39b --- /dev/null +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_to_text.py @@ -0,0 +1,412 @@ +from typing import Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...modeling_utils import ModelMixin +from ...models.embeddings import TimestepEmbedding, Timesteps +from ...models.unet_2d_condition import UNet2DConditionOutput +from ...utils import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class UNetMultiDimConditionModel(ModelMixin, ConfigMixin): + r""" + UNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep + and returns sample shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library + implements for all the models (such as downloading or saving, etc.) + + Parameters: + sample_size (`int`, *optional*): The size of the input sample. + in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. + center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. + flip_sin_to_cos (`bool`, *optional*, defaults to `True`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): + The tuple of upsample blocks to use. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. + cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. + attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + center_input_sample: bool = False, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str] = ( + "CrossAttnDownBlockMultiDim", + "CrossAttnDownBlockMultiDim", + "CrossAttnDownBlockMultiDim", + "DownBlockMultiDim", + ), + up_block_types: Tuple[str] = ( + "UpBlockMultiDim", + "CrossAttnUpBlockMultiDim", + "CrossAttnUpBlockMultiDim", + "CrossAttnUpBlockMultiDim", + ), + block_out_channels: Tuple[int] = (320, 640, 1280, 1280), + block_second_dim: Tuple[int] = (4, 4, 4, 4), + layers_per_block: int = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: int = 32, + norm_eps: float = 1e-5, + cross_attention_dim: int = 1280, + attention_head_dim: int = 8, + ): + super().__init__() + + self.sample_size = sample_size + time_embed_dim = block_out_channels[0] * 4 + + # input + self.conv_in = LinearMultiDim([in_channels, 1, 1], block_out_channels[0], kernel_size=3, padding=(1, 1)) + + # time + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + + self.down_blocks = nn.ModuleList([]) + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = self.get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim, + downsample_padding=downsample_padding, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlockMultiDimCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift="default", + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim, + resnet_groups=norm_num_groups, + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = self.get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) + self.conv_act = nn.SiLU() + self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) + + def get_down_block( + down_block_type, + num_layers, + in_channels, + out_channels, + temb_channels, + add_downsample, + resnet_eps, + resnet_act_fn, + attn_num_head_channels, + resnet_groups=None, + cross_attention_dim=None, + downsample_padding=None, + ): + down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type + if down_block_type == "DownBlockMultiDim": + return DownBlockMultiDim( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + ) + elif down_block_type == "CrossAttnDownBlockMultiDim": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") + return CrossAttnDownBlockMultiDim( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attn_num_head_channels, + ) + + def set_attention_slice(self, slice_size): + if slice_size is not None and self.config.attention_head_dim % slice_size != 0: + raise ValueError( + f"Make sure slice_size {slice_size} is a divisor of " + f"the number of heads used in cross_attention {self.config.attention_head_dim}" + ) + if slice_size is not None and slice_size > self.config.attention_head_dim: + raise ValueError( + f"Chunk_size {slice_size} has to be smaller or equal to " + f"the number of heads used in cross_attention {self.config.attention_head_dim}" + ) + + for block in self.down_blocks: + if hasattr(block, "attentions") and block.attentions is not None: + block.set_attention_slice(slice_size) + + self.mid_block.set_attention_slice(slice_size) + + for block in self.up_blocks: + if hasattr(block, "attentions") and block.attentions is not None: + block.set_attention_slice(slice_size) + + def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool): + for block in self.down_blocks: + if hasattr(block, "attentions") and block.attentions is not None: + block.set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) + + self.mid_block.set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) + + for block in self.up_blocks: + if hasattr(block, "attentions") and block.attentions is not None: + block.set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance( + module, (CrossAttnDownBlockMultiDim, DownBlockMultiDim, CrossAttnUpBlockMultiDim, UpBlockMultiDim) + ): + module.gradient_checkpointing = value + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + return_dict: bool = True, + ) -> Union[UNet2DConditionOutput, Tuple]: + r""" + Args: + sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor + timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps + encoder_hidden_states (`torch.FloatTensor`): (batch, channel, height, width) encoder hidden states + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. + + Returns: + [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): + logger.info("Forward upsample size to force interpolation output size.") + forward_upsample_size = True + + # 0. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=self.dtype) + emb = self.time_embedding(t_emb) + + # 2. pre-process + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "attentions") and downsample_block.attentions is not None: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. mid + sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states) + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "attentions") and upsample_block.attentions is not None: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + upsample_size=upsample_size, + ) + else: + sample = upsample_block( + hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size + ) + # 6. post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) + + +class LinearMultiDim(nn.Linear): + def __init__(self, in_features, out_features, *args, **kwargs): + in_features = [in_features] if isinstance(in_features, int) else list(in_features) + out_features = [out_features] if isinstance(out_features, int) else list(out_features) + self.in_features_multidim = in_features + self.out_features_multidim = out_features + super().__init__(np.array(in_features).prod(), np.array(out_features).prod(), *args, **kwargs) + + def forward(self, x): + shape = x.shape + n = len(self.in_features_multidim) + x = x.view(*shape[0:-n], self.in_features) + y = super().forward(x) + y = y.view(*shape[0:-n], *self.out_features_multidim) + return y + + +class DownBlockMultiDim(nn.Module): + pass + + +class UNetMidBlockMultiDimCrossAttn(nn.Module): + pass + + +class DownBlockMultiDim(nn.Module): + pass + + +class CrossAttnDownBlockMultiDim(nn.Module): + pass + + +class UpBlockMultiDim(nn.Module): + pass + + +class CrossAttnUpBlockMultiDim(nn.Module): + pass diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index 6c2c8fb77c..24c2df835d 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -17,7 +17,6 @@ from typing import List, Optional, Tuple, Union import numpy as np import torch -import torch.nn as nn import torch.utils.checkpoint import PIL diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py index 3f833a6ba4..ed2a67bbe3 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -13,19 +13,22 @@ # limitations under the License. import inspect -from typing import List, Optional, Tuple, Union +import logging +from typing import Callable, List, Optional, Union -import numpy as np import torch import torch.utils.checkpoint -import PIL from transformers import CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel from ...models import AutoencoderKL, UNet2DConditionModel, VQModel from ...models.attention import Transformer2DModel from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import is_accelerate_available, logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): @@ -75,6 +78,8 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): vae=vae, scheduler=scheduler, ) + + def swap_unet_attention_blocks(self): for name, module in self.image_unet.named_modules(): if isinstance(module, Transformer2DModel): parent_name, index = name.rsplit(".", 1) @@ -84,15 +89,107 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): self.image_unet.get_submodule(parent_name)[index], ) - def _encode_prompt(self, prompt, do_classifier_free_guidance): + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_xformers_memory_efficient_attention with unet->image_unet + def enable_xformers_memory_efficient_attention(self): + r""" + Enable memory efficient attention as implemented in xformers. + + When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference + time. Speed up at training time is not guaranteed. + + Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention + is used. + """ + self.image_unet.set_use_memory_efficient_attention_xformers(True) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_xformers_memory_efficient_attention with unet->image_unet + def disable_xformers_memory_efficient_attention(self): + r""" + Disable memory efficient attention as implemented in xformers. + """ + self.image_unet.set_use_memory_efficient_attention_xformers(False) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_attention_slicing with unet->image_unet + def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module will split the input tensor in slices, to compute attention + in several steps. This is useful to save some memory in exchange for a small speed decrease. + + Args: + slice_size (`str` or `int`, *optional*, defaults to `"auto"`): + When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If + a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, + `attention_head_dim` must be a multiple of `slice_size`. + """ + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = self.image_unet.config.attention_head_dim // 2 + self.image_unet.set_attention_slice(slice_size) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_attention_slicing + def disable_attention_slicing(self): + r""" + Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go + back to computing attention in one step. + """ + # set slice_size = `None` to disable `attention slicing` + self.enable_attention_slicing(None) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + """ + if is_accelerate_available(): + from accelerate import cpu_offload + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + device = torch.device(f"cuda:{gpu_id}") + + for cpu_offloaded_model in [self.image_unet, self.text_unet, self.text_encoder, self.vae]: + if cpu_offloaded_model is not None: + cpu_offload(cpu_offloaded_model, device) + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device with unet->image_unet + def _execution_device(self): + r""" + Returns the device on which the pipeline's models will be executed. After calling + `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module + hooks. + """ + if self.device != torch.device("meta") or not hasattr(self.image_unet, "_hf_hook"): + return self.device + for module in self.image_unet.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + return self.device + + def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). """ def normalize_embeddings(encoder_output): @@ -103,155 +200,294 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): batch_size = len(prompt) if isinstance(prompt, list) else 1 - if do_classifier_free_guidance: - uncond_input = self.tokenizer([""] * batch_size, padding="max_length", max_length=77, return_tensors="pt") - uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device)) - uncond_embeddings = normalize_embeddings(uncond_embeddings) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids - # get prompt text embeddings - text_input = self.tokenizer(prompt, padding="max_length", max_length=77, return_tensors="pt") - text_embeddings = self.text_encoder(text_input.input_ids.to(self.device)) + if not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + text_embeddings = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) text_embeddings = normalize_embeddings(text_embeddings) - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = text_embeddings.shape + text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) + text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + uncond_embeddings = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + uncond_embeddings = normalize_embeddings(uncond_embeddings) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = uncond_embeddings.shape[1] + uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) + uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings - def _encode_image_prompt(self, prompt, do_classifier_free_guidance): - r""" - Encodes the image prompt into image encoder hidden states. + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / 0.18215 * latents + image = self.vae.decode(latents).sample + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image - Args: - prompt (`str` or `list(int)`): - prompt to be encoded - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - """ + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] - def normalize_embeddings(encoder_output): - embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) - embeds = self.image_encoder.visual_projection(embeds) - embeds_pooled = embeds[:, 0:1] - embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) - return embeds + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta - batch_size = len(prompt) if isinstance(prompt, list) else 1 + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs - if do_classifier_free_guidance: - dummy_images = [np.zeros((512, 512, 3))] * batch_size - dummy_images = self.image_processor(images=dummy_images, return_tensors="pt") - uncond_embeddings = self.image_encoder(dummy_images.pixel_values.to(self.device)) - uncond_embeddings = normalize_embeddings(uncond_embeddings) - - # get prompt text embeddings - image_input = self.image_processor(images=prompt, return_tensors="pt") - image_embeddings = self.image_encoder(image_input.pixel_values.to(self.device)) - image_embeddings = normalize_embeddings(image_embeddings) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and image embeddings into a single batch - # to avoid doing two forward passes - image_embeddings = torch.cat([uncond_embeddings, image_embeddings]) - - return image_embeddings - - @torch.no_grad() - def __call__( - self, - prompt: Optional[Union[str, List[str]]] = None, - height: Optional[int] = 512, - width: Optional[int] = 512, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 1.0, - eta: Optional[float] = 0.0, - generator: Optional[torch.Generator] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[Tuple, ImagePipelineOutput]: - r""" - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - height (`int`, *optional*, defaults to 256): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 256): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 1.0): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt` at - the, usually at the expense of lower image quality. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*): - Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if - `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the - generated images. - """ - do_classifier_free_guidance = guidance_scale > 1.0 + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs(self, prompt, height, width, callback_steps): + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // 8, width // 8) + if latents is None: + if device.type == "mps": + # randn does not work reproducibly on mps + latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device) + else: + latents = torch.randn(shape, generator=generator, device=device, dtype=dtype) else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) - condition_embeddings = self._encode_prompt(prompt, do_classifier_free_guidance) + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents - latents = torch.randn( - (batch_size, self.image_unet.in_channels, height // 8, width // 8), generator=generator, device=self.device + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: int = 512, + width: int = 512, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation + deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_embeddings = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) - self.scheduler.set_timesteps(num_inference_steps) + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + # 5. Prepare latent variables + num_channels_latents = self.image_unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + text_embeddings.dtype, + device, + generator, + latents, + ) - extra_kwargs = {} - if accepts_eta: - extra_kwargs["eta"] = eta + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - for t in self.progress_bar(self.scheduler.timesteps): - if not do_classifier_free_guidance: - latents_input = latents - else: - latents_input = torch.cat([latents] * 2) + # 7. Swap the attention blocks between the image and text UNets + self.swap_unet_attention_blocks() + + # 8. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual - noise_pred = self.image_unet(latents_input, t, encoder_hidden_states=condition_embeddings).sample + noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample + # perform guidance - if guidance_scale != 1.0: - noise_pred_uncond, noise_prediction_cond = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_cond - noise_pred_uncond) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - # scale and decode the image latents with vae - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() + # 9. Swap the attention blocks backs in case the UNets are reused in another pipeline + self.swap_unet_attention_blocks() + + # 10. Post-processing + image = self.decode_latents(latents) + + # 11. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) @@ -259,378 +495,3 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): return (image,) return ImagePipelineOutput(images=image) - - -# class UNetMultiDimConditionModel(ModelMixin, ConfigMixin): -# r""" -# UNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep -# and returns sample shaped output. -# -# This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library -# implements for all the models (such as downloading or saving, etc.) -# -# Parameters: -# sample_size (`int`, *optional*): The size of the input sample. -# in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. -# out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. -# center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. -# flip_sin_to_cos (`bool`, *optional*, defaults to `True`): -# Whether to flip the sin to cos in the time embedding. -# freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. -# down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): -# The tuple of downsample blocks to use. -# up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): -# The tuple of upsample blocks to use. -# block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): -# The tuple of output channels for each block. -# layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. -# downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. -# mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. -# act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. -# norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. -# norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. -# cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. -# attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. -# """ -# -# _supports_gradient_checkpointing = True -# -# @register_to_config -# def __init__( -# self, -# sample_size: Optional[int] = None, -# in_channels: int = 4, -# out_channels: int = 4, -# center_input_sample: bool = False, -# flip_sin_to_cos: bool = True, -# freq_shift: int = 0, -# down_block_types: Tuple[str] = ( -# "CrossAttnDownBlockMultiDim", -# "CrossAttnDownBlockMultiDim", -# "CrossAttnDownBlockMultiDim", -# "DownBlockMultiDim", -# ), -# up_block_types: Tuple[str] = ( -# "UpBlockMultiDim", -# "CrossAttnUpBlockMultiDim", -# "CrossAttnUpBlockMultiDim", -# "CrossAttnUpBlockMultiDim" -# ), -# block_out_channels: Tuple[int] = (320, 640, 1280, 1280), -# block_second_dim: Tuple[int] = (4, 4, 4, 4), -# layers_per_block: int = 2, -# downsample_padding: int = 1, -# mid_block_scale_factor: float = 1, -# act_fn: str = "silu", -# norm_num_groups: int = 32, -# norm_eps: float = 1e-5, -# cross_attention_dim: int = 1280, -# attention_head_dim: int = 8, -# ): -# super().__init__() -# -# self.sample_size = sample_size -# time_embed_dim = block_out_channels[0] * 4 -# -# # input -# self.conv_in = LinearMultiDim([in_channels, 1, 1], block_out_channels[0], kernel_size=3, padding=(1, 1)) -# -# # time -# self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) -# timestep_input_dim = block_out_channels[0] -# -# self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) -# -# self.down_blocks = nn.ModuleList([]) -# self.mid_block = None -# self.up_blocks = nn.ModuleList([]) -# -# # down -# output_channel = block_out_channels[0] -# for i, down_block_type in enumerate(down_block_types): -# input_channel = output_channel -# output_channel = block_out_channels[i] -# is_final_block = i == len(block_out_channels) - 1 -# -# down_block = self.get_down_block( -# down_block_type, -# num_layers=layers_per_block, -# in_channels=input_channel, -# out_channels=output_channel, -# temb_channels=time_embed_dim, -# add_downsample=not is_final_block, -# resnet_eps=norm_eps, -# resnet_act_fn=act_fn, -# resnet_groups=norm_num_groups, -# cross_attention_dim=cross_attention_dim, -# attn_num_head_channels=attention_head_dim, -# downsample_padding=downsample_padding, -# ) -# self.down_blocks.append(down_block) -# -# # mid -# self.mid_block = UNetMidBlockMultiDimCrossAttn( -# in_channels=block_out_channels[-1], -# temb_channels=time_embed_dim, -# resnet_eps=norm_eps, -# resnet_act_fn=act_fn, -# output_scale_factor=mid_block_scale_factor, -# resnet_time_scale_shift="default", -# cross_attention_dim=cross_attention_dim, -# attn_num_head_channels=attention_head_dim, -# resnet_groups=norm_num_groups, -# ) -# -# # count how many layers upsample the images -# self.num_upsamplers = 0 -# -# # up -# reversed_block_out_channels = list(reversed(block_out_channels)) -# output_channel = reversed_block_out_channels[0] -# for i, up_block_type in enumerate(up_block_types): -# is_final_block = i == len(block_out_channels) - 1 -# -# prev_output_channel = output_channel -# output_channel = reversed_block_out_channels[i] -# input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] -# -# # add upsample block for all BUT final layer -# if not is_final_block: -# add_upsample = True -# self.num_upsamplers += 1 -# else: -# add_upsample = False -# -# up_block = self.get_up_block( -# up_block_type, -# num_layers=layers_per_block + 1, -# in_channels=input_channel, -# out_channels=output_channel, -# prev_output_channel=prev_output_channel, -# temb_channels=time_embed_dim, -# add_upsample=add_upsample, -# resnet_eps=norm_eps, -# resnet_act_fn=act_fn, -# resnet_groups=norm_num_groups, -# cross_attention_dim=cross_attention_dim, -# attn_num_head_channels=attention_head_dim, -# ) -# self.up_blocks.append(up_block) -# prev_output_channel = output_channel -# -# # out -# self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) -# self.conv_act = nn.SiLU() -# self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) -# -# def get_down_block( -# down_block_type, -# num_layers, -# in_channels, -# out_channels, -# temb_channels, -# add_downsample, -# resnet_eps, -# resnet_act_fn, -# attn_num_head_channels, -# resnet_groups=None, -# cross_attention_dim=None, -# downsample_padding=None, -# ): -# down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type -# if down_block_type == "DownBlockMultiDim": -# return DownBlockMultiDim( -# num_layers=num_layers, -# in_channels=in_channels, -# out_channels=out_channels, -# temb_channels=temb_channels, -# add_downsample=add_downsample, -# resnet_eps=resnet_eps, -# resnet_act_fn=resnet_act_fn, -# resnet_groups=resnet_groups, -# downsample_padding=downsample_padding, -# ) -# elif down_block_type == "CrossAttnDownBlockMultiDim": -# if cross_attention_dim is None: -# raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") -# return CrossAttnDownBlockMultiDim( -# num_layers=num_layers, -# in_channels=in_channels, -# out_channels=out_channels, -# temb_channels=temb_channels, -# add_downsample=add_downsample, -# resnet_eps=resnet_eps, -# resnet_act_fn=resnet_act_fn, -# resnet_groups=resnet_groups, -# downsample_padding=downsample_padding, -# cross_attention_dim=cross_attention_dim, -# attn_num_head_channels=attn_num_head_channels, -# ) -# -# def set_attention_slice(self, slice_size): -# if slice_size is not None and self.config.attention_head_dim % slice_size != 0: -# raise ValueError( -# f"Make sure slice_size {slice_size} is a divisor of " -# f"the number of heads used in cross_attention {self.config.attention_head_dim}" -# ) -# if slice_size is not None and slice_size > self.config.attention_head_dim: -# raise ValueError( -# f"Chunk_size {slice_size} has to be smaller or equal to " -# f"the number of heads used in cross_attention {self.config.attention_head_dim}" -# ) -# -# for block in self.down_blocks: -# if hasattr(block, "attentions") and block.attentions is not None: -# block.set_attention_slice(slice_size) -# -# self.mid_block.set_attention_slice(slice_size) -# -# for block in self.up_blocks: -# if hasattr(block, "attentions") and block.attentions is not None: -# block.set_attention_slice(slice_size) -# -# def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool): -# for block in self.down_blocks: -# if hasattr(block, "attentions") and block.attentions is not None: -# block.set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) -# -# self.mid_block.set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) -# -# for block in self.up_blocks: -# if hasattr(block, "attentions") and block.attentions is not None: -# block.set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) -# -# def _set_gradient_checkpointing(self, module, value=False): -# if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D, CrossAttnUpBlock2D, UpBlock2D)): -# module.gradient_checkpointing = value -# -# def forward( -# self, -# sample: torch.FloatTensor, -# timestep: Union[torch.Tensor, float, int], -# encoder_hidden_states: torch.Tensor, -# return_dict: bool = True, -# ) -> Union[UNet2DConditionOutput, Tuple]: -# r""" -# Args: -# sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor -# timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps -# encoder_hidden_states (`torch.FloatTensor`): (batch, channel, height, width) encoder hidden states -# return_dict (`bool`, *optional*, defaults to `True`): -# Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. -# -# Returns: -# [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: -# [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When -# returning a tuple, the first element is the sample tensor. -# """ -# # By default samples have to be AT least a multiple of the overall upsampling factor. -# # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). -# # However, the upsampling interpolation output size can be forced to fit any upsampling size -# # on the fly if necessary. -# default_overall_up_factor = 2**self.num_upsamplers -# -# # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` -# forward_upsample_size = False -# upsample_size = None -# -# if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): -# logger.info("Forward upsample size to force interpolation output size.") -# forward_upsample_size = True -# -# # 0. center input if necessary -# if self.config.center_input_sample: -# sample = 2 * sample - 1.0 -# -# # 1. time -# timesteps = timestep -# if not torch.is_tensor(timesteps): -# # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can -# timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) -# elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: -# timesteps = timesteps[None].to(sample.device) -# -# # broadcast to batch dimension in a way that's compatible with ONNX/Core ML -# timesteps = timesteps.expand(sample.shape[0]) -# -# t_emb = self.time_proj(timesteps) -# -# # timesteps does not contain any weights and will always return f32 tensors -# # but time_embedding might actually be running in fp16. so we need to cast here. -# # there might be better ways to encapsulate this. -# t_emb = t_emb.to(dtype=self.dtype) -# emb = self.time_embedding(t_emb) -# -# # 2. pre-process -# sample = self.conv_in(sample) -# -# # 3. down -# down_block_res_samples = (sample,) -# for downsample_block in self.down_blocks: -# if hasattr(downsample_block, "attentions") and downsample_block.attentions is not None: -# sample, res_samples = downsample_block( -# hidden_states=sample, -# temb=emb, -# encoder_hidden_states=encoder_hidden_states, -# ) -# else: -# sample, res_samples = downsample_block(hidden_states=sample, temb=emb) -# -# down_block_res_samples += res_samples -# -# # 4. mid -# sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states) -# -# # 5. up -# for i, upsample_block in enumerate(self.up_blocks): -# is_final_block = i == len(self.up_blocks) - 1 -# -# res_samples = down_block_res_samples[-len(upsample_block.resnets) :] -# down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] -# -# # if we have not reached the final block and need to forward the -# # upsample size, we do it here -# if not is_final_block and forward_upsample_size: -# upsample_size = down_block_res_samples[-1].shape[2:] -# -# if hasattr(upsample_block, "attentions") and upsample_block.attentions is not None: -# sample = upsample_block( -# hidden_states=sample, -# temb=emb, -# res_hidden_states_tuple=res_samples, -# encoder_hidden_states=encoder_hidden_states, -# upsample_size=upsample_size, -# ) -# else: -# sample = upsample_block( -# hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size -# ) -# # 6. post-process -# sample = self.conv_norm_out(sample) -# sample = self.conv_act(sample) -# sample = self.conv_out(sample) -# -# if not return_dict: -# return (sample,) -# -# return UNet2DConditionOutput(sample=sample) -# -# -# class LinearMultiDim(nn.Linear): -# def __init__(self, in_features, out_features, *args, **kwargs): -# in_features = [in_features] if isinstance(in_features, int) else list(in_features) -# out_features = [out_features] if isinstance(out_features, int) else list(out_features) -# self.in_features_multidim = in_features -# self.out_features_multidim = out_features -# super().__init__( -# np.array(in_features).prod(), -# np.array(out_features).prod(), -# *args, **kwargs) -# -# def forward(self, x): -# shape = x.shape -# n = len(self.in_features_multidim) -# x = x.view(*shape[0:-n], self.in_features) -# y = super().forward(x) -# y = y.view(*shape[0:-n], *self.out_features_multidim) -# return y diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 92c163ba74..2ad0ead440 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -124,6 +124,36 @@ class StableDiffusionPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class VQDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py index 08a31366e1..3ba275df76 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py @@ -19,7 +19,7 @@ import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline -from diffusers.utils.testing_utils import load_image, require_torch, slow, torch_device +from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device from ...test_pipelines_common import PipelineTesterMixin @@ -32,7 +32,7 @@ class VersatileDiffusionTextToImagePipelineFastTests(PipelineTesterMixin, unitte @slow -@require_torch +@require_torch_gpu class VersatileDiffusionTextToImagePipelineIntegrationTests(unittest.TestCase): def test_inference_text2img(self): pipe = VersatileDiffusionTextToImagePipeline.from_pretrained("diffusers/vd-official-test") @@ -45,8 +45,8 @@ class VersatileDiffusionTextToImagePipelineIntegrationTests(unittest.TestCase): prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" ).images - image_slice = image[0, -3:, -3:, -1] + image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.9256, 0.9340, 0.8933, 0.9361, 0.9113, 0.8727, 0.9122, 0.8745, 0.8099]) + expected_slice = np.array([0.0657, 0.0529, 0.0455, 0.0802, 0.0570, 0.0179, 0.0267, 0.0483, 0.0769]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2