From 7993be9e7f1b8b61a434b8168f91bf330cff970d Mon Sep 17 00:00:00 2001 From: galbria <158810732+galbria@users.noreply.github.com> Date: Wed, 20 Aug 2025 12:27:39 +0300 Subject: [PATCH 01/74] Bria 3 2 pipeline (#12010) * Add Bria model and pipeline to diffusers - Introduced `BriaTransformer2DModel` and `BriaPipeline` for enhanced image generation capabilities. - Updated import structures across various modules to include the new Bria components. - Added utility functions and output classes specific to the Bria pipeline. - Implemented tests for the Bria pipeline to ensure functionality and output integrity. * with working tests * style and quality pass * adding docs * add to overview * fixes from "make fix-copies" * Refactor transformer_bria.py and pipeline_bria.py: Introduce new EmbedND class for rotary position embedding, and enhance Timestep and TimestepProjEmbeddings classes. Add utility functions for handling negative prompts and generating original sigmas in pipeline_bria.py. * remove redundent and duplicates tests and fix bf16 slow test * style fixes * small doc update * Enhance Bria 3.2 documentation and implementation - Updated the GitHub repository link for Bria 3.2. - Added usage instructions for the gated model access. - Introduced the BriaTransformerBlock and BriaAttention classes to the model architecture. - Refactored existing classes to integrate Bria-specific components, including BriaEmbedND and BriaPipeline. - Updated the pipeline output class to reflect Bria-specific functionality. - Adjusted test cases to align with the new Bria model structure. * Refactor Bria model components and update documentation - Removed outdated inference example from Bria 3.2 documentation. - Introduced the BriaTransformerBlock class to enhance model architecture. - Updated attention handling to use `attention_kwargs` instead of `joint_attention_kwargs`. - Improved import structure in the Bria pipeline to handle optional dependencies. - Adjusted test cases to reflect changes in model dtype assertions. * Update Bria model reference in documentation to reflect new file naming convention * Update docs/source/en/_toctree.yml * Refactor BriaPipeline to inherit from DiffusionPipeline instead of FluxPipeline, updating imports accordingly. * move the __call__ func to the end of file * Update BriaPipeline example to use bfloat16 for precision sensitivity for better result * make style && make quality && make fix-copiessource --------- Co-authored-by: Linoy Tsaban <57615435+linoytsaban@users.noreply.github.com> Co-authored-by: Aryan --- docs/source/en/_toctree.yml | 4 + docs/source/en/api/models/bria_transformer.md | 19 + docs/source/en/api/pipelines/bria_3_2.md | 44 ++ docs/source/en/api/pipelines/overview.md | 1 + src/diffusers/__init__.py | 4 + src/diffusers/hooks/_helpers.py | 8 + src/diffusers/models/__init__.py | 2 + src/diffusers/models/transformers/__init__.py | 1 + .../models/transformers/transformer_bria.py | 719 +++++++++++++++++ src/diffusers/pipelines/__init__.py | 2 + src/diffusers/pipelines/bria/__init__.py | 48 ++ src/diffusers/pipelines/bria/pipeline_bria.py | 729 ++++++++++++++++++ .../pipelines/bria/pipeline_output.py | 21 + src/diffusers/utils/dummy_pt_objects.py | 15 + .../dummy_torch_and_transformers_objects.py | 15 + .../test_models_transformer_bria.py | 181 +++++ tests/pipelines/bria/__init__.py | 0 tests/pipelines/bria/test_pipeline_bria.py | 318 ++++++++ 18 files changed, 2131 insertions(+) create mode 100644 docs/source/en/api/models/bria_transformer.md create mode 100644 docs/source/en/api/pipelines/bria_3_2.md create mode 100644 src/diffusers/models/transformers/transformer_bria.py create mode 100644 src/diffusers/pipelines/bria/__init__.py create mode 100644 src/diffusers/pipelines/bria/pipeline_bria.py create mode 100644 src/diffusers/pipelines/bria/pipeline_output.py create mode 100644 tests/models/transformers/test_models_transformer_bria.py create mode 100644 tests/pipelines/bria/__init__.py create mode 100644 tests/pipelines/bria/test_pipeline_bria.py diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 6916035201..dd0193a3a8 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -340,6 +340,8 @@ title: AllegroTransformer3DModel - local: api/models/aura_flow_transformer2d title: AuraFlowTransformer2DModel + - local: api/models/bria_transformer + title: BriaTransformer2DModel - local: api/models/chroma_transformer title: ChromaTransformer2DModel - local: api/models/cogvideox_transformer3d @@ -468,6 +470,8 @@ title: AutoPipeline - local: api/pipelines/blip_diffusion title: BLIP-Diffusion + - local: api/pipelines/bria_3_2 + title: Bria 3.2 - local: api/pipelines/chroma title: Chroma - local: api/pipelines/cogvideox diff --git a/docs/source/en/api/models/bria_transformer.md b/docs/source/en/api/models/bria_transformer.md new file mode 100644 index 0000000000..9df7eeb6ff --- /dev/null +++ b/docs/source/en/api/models/bria_transformer.md @@ -0,0 +1,19 @@ + + +# BriaTransformer2DModel + +A modified flux Transformer model from [Bria](https://huggingface.co/briaai/BRIA-3.2) + +## BriaTransformer2DModel + +[[autodoc]] BriaTransformer2DModel diff --git a/docs/source/en/api/pipelines/bria_3_2.md b/docs/source/en/api/pipelines/bria_3_2.md new file mode 100644 index 0000000000..059fa01f9f --- /dev/null +++ b/docs/source/en/api/pipelines/bria_3_2.md @@ -0,0 +1,44 @@ + + +# Bria 3.2 + +Bria 3.2 is the next-generation commercial-ready text-to-image model. With just 4 billion parameters, it provides exceptional aesthetics and text rendering, evaluated to provide on par results to leading open-source models, and outperforming other licensed models. +In addition to being built entirely on licensed data, 3.2 provides several advantages for enterprise and commercial use: + +- Efficient Compute - the model is X3 smaller than the equivalent models in the market (4B parameters vs 12B parameters other open source models) +- Architecture Consistency: Same architecture as 3.1—ideal for users looking to upgrade without disruption. +- Fine-tuning Speedup: 2x faster fine-tuning on L40S and A100. + +Original model checkpoints for Bria 3.2 can be found [here](https://huggingface.co/briaai/BRIA-3.2). +Github repo for Bria 3.2 can be found [here](https://github.com/Bria-AI/BRIA-3.2). + +If you want to learn more about the Bria platform, and get free traril access, please visit [bria.ai](https://bria.ai). + + +## Usage + +_As the model is gated, before using it with diffusers you first need to go to the [Bria 3.2 Hugging Face page](https://huggingface.co/briaai/BRIA-3.2), fill in the form and accept the gate. Once you are in, you need to login so that your system knows you’ve accepted the gate._ + +Use the command below to log in: + +```bash +hf auth login +``` + + +## BriaPipeline + +[[autodoc]] BriaPipeline + - all + - __call__ + diff --git a/docs/source/en/api/pipelines/overview.md b/docs/source/en/api/pipelines/overview.md index 4e7a4e5e8d..f34262d37c 100644 --- a/docs/source/en/api/pipelines/overview.md +++ b/docs/source/en/api/pipelines/overview.md @@ -37,6 +37,7 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an | [AudioLDM2](audioldm2) | text2audio | | [AuraFlow](auraflow) | text2image | | [BLIP Diffusion](blip_diffusion) | text2image | +| [Bria 3.2](bria_3_2) | text2image | | [CogVideoX](cogvideox) | text2video | | [Consistency Models](consistency_models) | unconditional image generation | | [ControlNet](controlnet) | text2image, image2image, inpainting | diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 3a5699394e..3f0f87b926 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -181,6 +181,7 @@ else: "AutoencoderOobleck", "AutoencoderTiny", "AutoModel", + "BriaTransformer2DModel", "CacheMixin", "ChromaTransformer2DModel", "CogVideoXTransformer3DModel", @@ -397,6 +398,7 @@ else: "AuraFlowPipeline", "BlipDiffusionControlNetPipeline", "BlipDiffusionPipeline", + "BriaPipeline", "ChromaImg2ImgPipeline", "ChromaPipeline", "CLIPImageProjection", @@ -846,6 +848,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: AutoencoderOobleck, AutoencoderTiny, AutoModel, + BriaTransformer2DModel, CacheMixin, ChromaTransformer2DModel, CogVideoXTransformer3DModel, @@ -1032,6 +1035,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: AudioLDM2UNet2DConditionModel, AudioLDMPipeline, AuraFlowPipeline, + BriaPipeline, ChromaImg2ImgPipeline, ChromaPipeline, CLIPImageProjection, diff --git a/src/diffusers/hooks/_helpers.py b/src/diffusers/hooks/_helpers.py index c36c0c31ea..b7a74be2e5 100644 --- a/src/diffusers/hooks/_helpers.py +++ b/src/diffusers/hooks/_helpers.py @@ -144,6 +144,7 @@ def _register_attention_processors_metadata(): def _register_transformer_blocks_metadata(): from ..models.attention import BasicTransformerBlock from ..models.transformers.cogvideox_transformer_3d import CogVideoXBlock + from ..models.transformers.transformer_bria import BriaTransformerBlock from ..models.transformers.transformer_cogview4 import CogView4TransformerBlock from ..models.transformers.transformer_flux import FluxSingleTransformerBlock, FluxTransformerBlock from ..models.transformers.transformer_hunyuan_video import ( @@ -165,6 +166,13 @@ def _register_transformer_blocks_metadata(): return_encoder_hidden_states_index=None, ), ) + TransformerBlockRegistry.register( + model_class=BriaTransformerBlock, + metadata=TransformerBlockMetadata( + return_hidden_states_index=0, + return_encoder_hidden_states_index=None, + ), + ) # CogVideoX TransformerBlockRegistry.register( diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index 972233bd98..c432640362 100755 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -76,6 +76,7 @@ if is_torch_available(): _import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"] _import_structure["transformers.transformer_2d"] = ["Transformer2DModel"] _import_structure["transformers.transformer_allegro"] = ["AllegroTransformer3DModel"] + _import_structure["transformers.transformer_bria"] = ["BriaTransformer2DModel"] _import_structure["transformers.transformer_chroma"] = ["ChromaTransformer2DModel"] _import_structure["transformers.transformer_cogview3plus"] = ["CogView3PlusTransformer2DModel"] _import_structure["transformers.transformer_cogview4"] = ["CogView4Transformer2DModel"] @@ -158,6 +159,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .transformers import ( AllegroTransformer3DModel, AuraFlowTransformer2DModel, + BriaTransformer2DModel, ChromaTransformer2DModel, CogVideoXTransformer3DModel, CogView3PlusTransformer2DModel, diff --git a/src/diffusers/models/transformers/__init__.py b/src/diffusers/models/transformers/__init__.py index 5550fed92d..b60f0636e6 100755 --- a/src/diffusers/models/transformers/__init__.py +++ b/src/diffusers/models/transformers/__init__.py @@ -17,6 +17,7 @@ if is_torch_available(): from .t5_film_transformer import T5FilmDecoder from .transformer_2d import Transformer2DModel from .transformer_allegro import AllegroTransformer3DModel + from .transformer_bria import BriaTransformer2DModel from .transformer_chroma import ChromaTransformer2DModel from .transformer_cogview3plus import CogView3PlusTransformer2DModel from .transformer_cogview4 import CogView4Transformer2DModel diff --git a/src/diffusers/models/transformers/transformer_bria.py b/src/diffusers/models/transformers/transformer_bria.py new file mode 100644 index 0000000000..27a9941501 --- /dev/null +++ b/src/diffusers/models/transformers/transformer_bria.py @@ -0,0 +1,719 @@ +import inspect +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import FromOriginalModelMixin, PeftAdapterMixin +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import maybe_allow_in_graph +from ..attention import AttentionModuleMixin, FeedForward +from ..attention_dispatch import dispatch_attention_fn +from ..cache_utils import CacheMixin +from ..embeddings import TimestepEmbedding, apply_rotary_emb, get_timestep_embedding +from ..modeling_outputs import Transformer2DModelOutput +from ..modeling_utils import ModelMixin +from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def _get_projections(attn: "BriaAttention", hidden_states, encoder_hidden_states=None): + query = attn.to_q(hidden_states) + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + + encoder_query = encoder_key = encoder_value = None + if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None: + encoder_query = attn.add_q_proj(encoder_hidden_states) + encoder_key = attn.add_k_proj(encoder_hidden_states) + encoder_value = attn.add_v_proj(encoder_hidden_states) + + return query, key, value, encoder_query, encoder_key, encoder_value + + +def _get_fused_projections(attn: "BriaAttention", hidden_states, encoder_hidden_states=None): + query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) + + encoder_query = encoder_key = encoder_value = (None,) + if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"): + encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1) + + return query, key, value, encoder_query, encoder_key, encoder_value + + +def _get_qkv_projections(attn: "BriaAttention", hidden_states, encoder_hidden_states=None): + if attn.fused_projections: + return _get_fused_projections(attn, hidden_states, encoder_hidden_states) + return _get_projections(attn, hidden_states, encoder_hidden_states) + + +def get_1d_rotary_pos_embed( + dim: int, + pos: Union[np.ndarray, int], + theta: float = 10000.0, + use_real=False, + linear_factor=1.0, + ntk_factor=1.0, + repeat_interleave_real=True, + freqs_dtype=torch.float32, # torch.float32, torch.float64 (flux) +): + """ + Precompute the frequency tensor for complex exponentials (cis) with given dimensions. + + This function calculates a frequency tensor with complex exponentials using the given dimension 'dim' and the end + index 'end'. The 'theta' parameter scales the frequencies. The returned tensor contains complex values in complex64 + data type. + + Args: + dim (`int`): Dimension of the frequency tensor. + pos (`np.ndarray` or `int`): Position indices for the frequency tensor. [S] or scalar + theta (`float`, *optional*, defaults to 10000.0): + Scaling factor for frequency computation. Defaults to 10000.0. + use_real (`bool`, *optional*): + If True, return real part and imaginary part separately. Otherwise, return complex numbers. + linear_factor (`float`, *optional*, defaults to 1.0): + Scaling factor for the context extrapolation. Defaults to 1.0. + ntk_factor (`float`, *optional*, defaults to 1.0): + Scaling factor for the NTK-Aware RoPE. Defaults to 1.0. + repeat_interleave_real (`bool`, *optional*, defaults to `True`): + If `True` and `use_real`, real part and imaginary part are each interleaved with themselves to reach `dim`. + Otherwise, they are concateanted with themselves. + freqs_dtype (`torch.float32` or `torch.float64`, *optional*, defaults to `torch.float32`): + the dtype of the frequency tensor. + Returns: + `torch.Tensor`: Precomputed frequency tensor with complex exponentials. [S, D/2] + """ + assert dim % 2 == 0 + + if isinstance(pos, int): + pos = torch.arange(pos) + if isinstance(pos, np.ndarray): + pos = torch.from_numpy(pos) # type: ignore # [S] + + theta = theta * ntk_factor + freqs = ( + 1.0 + / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device)[: (dim // 2)] / dim)) + / linear_factor + ) # [D/2] + freqs = torch.outer(pos, freqs) # type: ignore # [S, D/2] + if use_real and repeat_interleave_real: + # bria + freqs_cos = freqs.cos().repeat_interleave(2, dim=1).float() # [S, D] + freqs_sin = freqs.sin().repeat_interleave(2, dim=1).float() # [S, D] + return freqs_cos, freqs_sin + elif use_real: + # stable audio, allegro + freqs_cos = torch.cat([freqs.cos(), freqs.cos()], dim=-1).float() # [S, D] + freqs_sin = torch.cat([freqs.sin(), freqs.sin()], dim=-1).float() # [S, D] + return freqs_cos, freqs_sin + else: + # lumina + freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 # [S, D/2] + return freqs_cis + + +class BriaAttnProcessor: + _attention_backend = None + + def __init__(self): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") + + def __call__( + self, + attn: "BriaAttention", + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor = None, + attention_mask: Optional[torch.Tensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( + attn, hidden_states, encoder_hidden_states + ) + + query = query.unflatten(-1, (attn.heads, -1)) + key = key.unflatten(-1, (attn.heads, -1)) + value = value.unflatten(-1, (attn.heads, -1)) + + query = attn.norm_q(query) + key = attn.norm_k(key) + + if attn.added_kv_proj_dim is not None: + encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) + encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) + encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) + + encoder_query = attn.norm_added_q(encoder_query) + encoder_key = attn.norm_added_k(encoder_key) + + query = torch.cat([encoder_query, query], dim=1) + key = torch.cat([encoder_key, key], dim=1) + value = torch.cat([encoder_value, value], dim=1) + + if image_rotary_emb is not None: + query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) + key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) + + hidden_states = dispatch_attention_fn( + query, key, value, attn_mask=attention_mask, backend=self._attention_backend + ) + hidden_states = hidden_states.flatten(2, 3) + hidden_states = hidden_states.to(query.dtype) + + if encoder_hidden_states is not None: + encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( + [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 + ) + hidden_states = attn.to_out[0](hidden_states) + hidden_states = attn.to_out[1](hidden_states) + encoder_hidden_states = attn.to_add_out(encoder_hidden_states) + + return hidden_states, encoder_hidden_states + else: + return hidden_states + + +class BriaAttention(torch.nn.Module, AttentionModuleMixin): + _default_processor_cls = BriaAttnProcessor + _available_processors = [ + BriaAttnProcessor, + ] + + def __init__( + self, + query_dim: int, + heads: int = 8, + dim_head: int = 64, + dropout: float = 0.0, + bias: bool = False, + added_kv_proj_dim: Optional[int] = None, + added_proj_bias: Optional[bool] = True, + out_bias: bool = True, + eps: float = 1e-5, + out_dim: int = None, + context_pre_only: Optional[bool] = None, + pre_only: bool = False, + elementwise_affine: bool = True, + processor=None, + ): + super().__init__() + + self.head_dim = dim_head + self.inner_dim = out_dim if out_dim is not None else dim_head * heads + self.query_dim = query_dim + self.use_bias = bias + self.dropout = dropout + self.out_dim = out_dim if out_dim is not None else query_dim + self.context_pre_only = context_pre_only + self.pre_only = pre_only + self.heads = out_dim // dim_head if out_dim is not None else heads + self.added_kv_proj_dim = added_kv_proj_dim + self.added_proj_bias = added_proj_bias + + self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) + self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) + self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) + self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) + self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) + + if not self.pre_only: + self.to_out = torch.nn.ModuleList([]) + self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) + self.to_out.append(torch.nn.Dropout(dropout)) + + if added_kv_proj_dim is not None: + self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps) + self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps) + self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) + self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) + self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) + self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias) + + if processor is None: + processor = self._default_processor_cls() + self.set_processor(processor) + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) + quiet_attn_parameters = {"ip_adapter_masks", "ip_hidden_states"} + unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters] + if len(unused_kwargs) > 0: + logger.warning( + f"attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." + ) + kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} + return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) + + +class BriaEmbedND(torch.nn.Module): + # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11 + def __init__(self, theta: int, axes_dim: List[int]): + super().__init__() + self.theta = theta + self.axes_dim = axes_dim + + def forward(self, ids: torch.Tensor) -> torch.Tensor: + n_axes = ids.shape[-1] + cos_out = [] + sin_out = [] + pos = ids.float() + is_mps = ids.device.type == "mps" + freqs_dtype = torch.float32 if is_mps else torch.float64 + for i in range(n_axes): + cos, sin = get_1d_rotary_pos_embed( + self.axes_dim[i], + pos[:, i], + theta=self.theta, + repeat_interleave_real=True, + use_real=True, + freqs_dtype=freqs_dtype, + ) + cos_out.append(cos) + sin_out.append(sin) + freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) + freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) + return freqs_cos, freqs_sin + + +class BriaTimesteps(nn.Module): + def __init__( + self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int = 1, time_theta=10000 + ): + super().__init__() + self.num_channels = num_channels + self.flip_sin_to_cos = flip_sin_to_cos + self.downscale_freq_shift = downscale_freq_shift + self.scale = scale + self.time_theta = time_theta + + def forward(self, timesteps): + t_emb = get_timestep_embedding( + timesteps, + self.num_channels, + flip_sin_to_cos=self.flip_sin_to_cos, + downscale_freq_shift=self.downscale_freq_shift, + scale=self.scale, + max_period=self.time_theta, + ) + return t_emb + + +class BriaTimestepProjEmbeddings(nn.Module): + def __init__(self, embedding_dim, time_theta): + super().__init__() + + self.time_proj = BriaTimesteps( + num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0, time_theta=time_theta + ) + self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) + + def forward(self, timestep, dtype): + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=dtype)) # (N, D) + return timesteps_emb + + +class BriaPosEmbed(torch.nn.Module): + # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11 + def __init__(self, theta: int, axes_dim: List[int]): + super().__init__() + self.theta = theta + self.axes_dim = axes_dim + + def forward(self, ids: torch.Tensor) -> torch.Tensor: + n_axes = ids.shape[-1] + cos_out = [] + sin_out = [] + pos = ids.float() + is_mps = ids.device.type == "mps" + freqs_dtype = torch.float32 if is_mps else torch.float64 + for i in range(n_axes): + cos, sin = get_1d_rotary_pos_embed( + self.axes_dim[i], + pos[:, i], + theta=self.theta, + repeat_interleave_real=True, + use_real=True, + freqs_dtype=freqs_dtype, + ) + cos_out.append(cos) + sin_out.append(sin) + freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) + freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) + return freqs_cos, freqs_sin + + +@maybe_allow_in_graph +class BriaTransformerBlock(nn.Module): + def __init__( + self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6 + ): + super().__init__() + + self.norm1 = AdaLayerNormZero(dim) + self.norm1_context = AdaLayerNormZero(dim) + + self.attn = BriaAttention( + query_dim=dim, + added_kv_proj_dim=dim, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=dim, + context_pre_only=False, + bias=True, + processor=BriaAttnProcessor(), + eps=eps, + ) + + self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") + + self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + temb: torch.Tensor, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) + + norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( + encoder_hidden_states, emb=temb + ) + attention_kwargs = attention_kwargs or {} + + # Attention. + attention_outputs = self.attn( + hidden_states=norm_hidden_states, + encoder_hidden_states=norm_encoder_hidden_states, + image_rotary_emb=image_rotary_emb, + **attention_kwargs, + ) + + if len(attention_outputs) == 2: + attn_output, context_attn_output = attention_outputs + elif len(attention_outputs) == 3: + attn_output, context_attn_output, ip_attn_output = attention_outputs + + # Process attention outputs for the `hidden_states`. + attn_output = gate_msa.unsqueeze(1) * attn_output + hidden_states = hidden_states + attn_output + + norm_hidden_states = self.norm2(hidden_states) + norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] + + ff_output = self.ff(norm_hidden_states) + ff_output = gate_mlp.unsqueeze(1) * ff_output + + hidden_states = hidden_states + ff_output + if len(attention_outputs) == 3: + hidden_states = hidden_states + ip_attn_output + + # Process attention outputs for the `encoder_hidden_states`. + context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output + encoder_hidden_states = encoder_hidden_states + context_attn_output + + norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) + norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] + + context_ff_output = self.ff_context(norm_encoder_hidden_states) + encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output + if encoder_hidden_states.dtype == torch.float16: + encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) + + return encoder_hidden_states, hidden_states + + +@maybe_allow_in_graph +class BriaSingleTransformerBlock(nn.Module): + def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0): + super().__init__() + self.mlp_hidden_dim = int(dim * mlp_ratio) + + self.norm = AdaLayerNormZeroSingle(dim) + self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) + self.act_mlp = nn.GELU(approximate="tanh") + self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) + + processor = BriaAttnProcessor() + + self.attn = BriaAttention( + query_dim=dim, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=dim, + bias=True, + processor=processor, + eps=1e-6, + pre_only=True, + ) + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + temb: torch.Tensor, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> torch.Tensor: + text_seq_len = encoder_hidden_states.shape[1] + hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) + + residual = hidden_states + norm_hidden_states, gate = self.norm(hidden_states, emb=temb) + mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) + attention_kwargs = attention_kwargs or {} + attn_output = self.attn( + hidden_states=norm_hidden_states, + image_rotary_emb=image_rotary_emb, + **attention_kwargs, + ) + + hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) + gate = gate.unsqueeze(1) + hidden_states = gate * self.proj_out(hidden_states) + hidden_states = residual + hidden_states + if hidden_states.dtype == torch.float16: + hidden_states = hidden_states.clip(-65504, 65504) + + encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:] + return encoder_hidden_states, hidden_states + + +class BriaTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): + """ + The Transformer model introduced in Flux. Based on FluxPipeline with several changes: + - no pooled embeddings + - We use zero padding for prompts + - No guidance embedding since this is not a distilled version + Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + + Parameters: + patch_size (`int`): Patch size to turn the input data into small patches. + in_channels (`int`, *optional*, defaults to 16): The number of channels in the input. + num_layers (`int`, *optional*, defaults to 18): The number of layers of MMDiT blocks to use. + num_single_layers (`int`, *optional*, defaults to 18): The number of layers of single DiT blocks to use. + attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head. + num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention. + joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. + pooled_projection_dim (`int`): Number of dimensions to use when projecting the `pooled_projections`. + guidance_embeds (`bool`, defaults to False): Whether to use guidance embeddings. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + patch_size: int = 1, + in_channels: int = 64, + num_layers: int = 19, + num_single_layers: int = 38, + attention_head_dim: int = 128, + num_attention_heads: int = 24, + joint_attention_dim: int = 4096, + pooled_projection_dim: int = None, + guidance_embeds: bool = False, + axes_dims_rope: List[int] = [16, 56, 56], + rope_theta=10000, + time_theta=10000, + ): + super().__init__() + self.out_channels = in_channels + self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim + + self.pos_embed = BriaEmbedND(theta=rope_theta, axes_dim=axes_dims_rope) + + self.time_embed = BriaTimestepProjEmbeddings(embedding_dim=self.inner_dim, time_theta=time_theta) + if guidance_embeds: + self.guidance_embed = BriaTimestepProjEmbeddings(embedding_dim=self.inner_dim) + + self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim) + self.x_embedder = torch.nn.Linear(self.config.in_channels, self.inner_dim) + + self.transformer_blocks = nn.ModuleList( + [ + BriaTransformerBlock( + dim=self.inner_dim, + num_attention_heads=self.config.num_attention_heads, + attention_head_dim=self.config.attention_head_dim, + ) + for i in range(self.config.num_layers) + ] + ) + + self.single_transformer_blocks = nn.ModuleList( + [ + BriaSingleTransformerBlock( + dim=self.inner_dim, + num_attention_heads=self.config.num_attention_heads, + attention_head_dim=self.config.attention_head_dim, + ) + for i in range(self.config.num_single_layers) + ] + ) + + self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) + self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor = None, + pooled_projections: torch.Tensor = None, + timestep: torch.LongTensor = None, + img_ids: torch.Tensor = None, + txt_ids: torch.Tensor = None, + guidance: torch.Tensor = None, + attention_kwargs: Optional[Dict[str, Any]] = None, + return_dict: bool = True, + controlnet_block_samples=None, + controlnet_single_block_samples=None, + ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: + """ + The [`BriaTransformer2DModel`] forward method. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): + Input `hidden_states`. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): + Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. + pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected + from the embeddings of input conditions. + timestep ( `torch.LongTensor`): + Used to indicate denoising step. + block_controlnet_hidden_states: (`list` of `torch.Tensor`): + A list of tensors that if specified are added to the residuals of transformer blocks. + attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain + tuple. + + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + if attention_kwargs is not None: + attention_kwargs = attention_kwargs.copy() + lora_scale = attention_kwargs.pop("scale", 1.0) + else: + lora_scale = 1.0 + + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + else: + if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: + logger.warning( + "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." + ) + hidden_states = self.x_embedder(hidden_states) + + timestep = timestep.to(hidden_states.dtype) + if guidance is not None: + guidance = guidance.to(hidden_states.dtype) + else: + guidance = None + + temb = self.time_embed(timestep, dtype=hidden_states.dtype) + + if guidance: + temb += self.guidance_embed(guidance, dtype=hidden_states.dtype) + + encoder_hidden_states = self.context_embedder(encoder_hidden_states) + + if len(txt_ids.shape) == 3: + txt_ids = txt_ids[0] + + if len(img_ids.shape) == 3: + img_ids = img_ids[0] + + ids = torch.cat((txt_ids, img_ids), dim=0) + image_rotary_emb = self.pos_embed(ids) + + for index_block, block in enumerate(self.transformer_blocks): + if torch.is_grad_enabled() and self.gradient_checkpointing: + encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( + block, + hidden_states, + encoder_hidden_states, + temb, + image_rotary_emb, + attention_kwargs, + ) + + else: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + temb=temb, + image_rotary_emb=image_rotary_emb, + ) + + # controlnet residual + if controlnet_block_samples is not None: + interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) + interval_control = int(np.ceil(interval_control)) + hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] + + for index_block, block in enumerate(self.single_transformer_blocks): + if torch.is_grad_enabled() and self.gradient_checkpointing: + encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( + block, + hidden_states, + encoder_hidden_states, + temb, + image_rotary_emb, + attention_kwargs, + ) + + else: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + temb=temb, + image_rotary_emb=image_rotary_emb, + ) + + # controlnet residual + if controlnet_single_block_samples is not None: + interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) + interval_control = int(np.ceil(interval_control)) + hidden_states[:, encoder_hidden_states.shape[1] :, ...] = ( + hidden_states[:, encoder_hidden_states.shape[1] :, ...] + + controlnet_single_block_samples[index_block // interval_control] + ) + + hidden_states = self.norm_out(hidden_states, temb) + output = self.proj_out(hidden_states) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 6b0394b486..de8eefd5ff 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -127,6 +127,7 @@ else: "AnimateDiffVideoToVideoPipeline", "AnimateDiffVideoToVideoControlNetPipeline", ] + _import_structure["bria"] = ["BriaPipeline"] _import_structure["flux"] = [ "FluxControlPipeline", "FluxControlInpaintPipeline", @@ -552,6 +553,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ) from .aura_flow import AuraFlowPipeline from .blip_diffusion import BlipDiffusionPipeline + from .bria import BriaPipeline from .chroma import ChromaImg2ImgPipeline, ChromaPipeline from .cogvideo import ( CogVideoXFunControlPipeline, diff --git a/src/diffusers/pipelines/bria/__init__.py b/src/diffusers/pipelines/bria/__init__.py new file mode 100644 index 0000000000..60e319ac79 --- /dev/null +++ b/src/diffusers/pipelines/bria/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_bria"] = ["BriaPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_bria import BriaPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/pipelines/bria/pipeline_bria.py b/src/diffusers/pipelines/bria/pipeline_bria.py new file mode 100644 index 0000000000..39ed484793 --- /dev/null +++ b/src/diffusers/pipelines/bria/pipeline_bria.py @@ -0,0 +1,729 @@ +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import ( + CLIPImageProcessor, + CLIPVisionModelWithProjection, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import FluxLoraLoaderMixin +from ...models import AutoencoderKL +from ...models.transformers.transformer_bria import BriaTransformer2DModel +from ...pipelines import DiffusionPipeline +from ...pipelines.bria.pipeline_output import BriaPipelineOutput +from ...pipelines.flux.pipeline_flux import calculate_shift, retrieve_timesteps +from ...schedulers import ( + DDIMScheduler, + EulerAncestralDiscreteScheduler, + FlowMatchEulerDiscreteScheduler, + KarrasDiffusionSchedulers, +) +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import BriaPipeline + + >>> pipe = BriaPipeline.from_pretrained("briaai/BRIA-3.2", torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + # BRIA's T5 text encoder is sensitive to precision. We need to cast it to bfloat16 and keep the final layer in float32. + + >>> pipe.text_encoder = pipe.text_encoder.to(dtype=torch.bfloat16) + >>> for block in pipe.text_encoder.encoder.block: + ... block.layer[-1].DenseReluDense.wo.to(dtype=torch.float32) + # BRIA's VAE is not supported in mixed precision, so we use float32. + + >>> if pipe.vae.config.shift_factor == 0: + ... pipe.vae.to(dtype=torch.float32) + + >>> prompt = "Photorealistic food photography of a stack of fluffy pancakes on a white plate, with maple syrup being poured over them. On top of the pancakes are the words 'BRIA 3.2' in bold, yellow, 3D letters. The background is dark and out of focus." + >>> image = pipe(prompt).images[0] + >>> image.save("bria.png") + ``` +""" + + +def is_ng_none(negative_prompt): + return ( + negative_prompt is None + or negative_prompt == "" + or (isinstance(negative_prompt, list) and negative_prompt[0] is None) + or (type(negative_prompt) == list and negative_prompt[0] == "") + ) + + +def get_original_sigmas(num_train_timesteps=1000, num_inference_steps=1000): + timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32)[::-1].copy() + sigmas = timesteps / num_train_timesteps + + inds = [int(ind) for ind in np.linspace(0, num_train_timesteps - 1, num_inference_steps)] + new_sigmas = sigmas[inds] + return new_sigmas + + +class BriaPipeline(DiffusionPipeline): + r""" + Based on FluxPipeline with several changes: + - no pooled embeddings + - We use zero padding for prompts + - No guidance embedding since this is not a distilled version + + Args: + transformer ([`BriaTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. Bria uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->transformer->vae" + _optional_components = ["image_encoder", "feature_extractor"] + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + transformer: BriaTransformer2DModel, + scheduler: Union[FlowMatchEulerDiscreteScheduler, KarrasDiffusionSchedulers], + vae: AutoencoderKL, + text_encoder: T5EncoderModel, + tokenizer: T5TokenizerFast, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + ): + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + transformer=transformer, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.default_sample_size = 64 # due to patchify=> 128,128 => res of 1k,1k + + if self.vae.config.shift_factor is None: + self.vae.config.shift_factor = 0 + self.vae.to(dtype=torch.float32) + + def encode_prompt( + self, + prompt: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + max_sequence_length: int = 128, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + if not is_ng_none(negative_prompt): + negative_prompt = ( + batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + ) + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embeds = self._get_t5_prompt_embeds( + prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + else: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + + if self.text_encoder is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + text_ids = torch.zeros(batch_size, prompt_embeds.shape[1], 3).to(device=device) + text_ids = text_ids.repeat(num_images_per_prompt, 1, 1) + + return prompt_embeds, negative_prompt_embeds, text_ids + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def attention_kwargs(self): + return self._attention_kwargs + + @attention_kwargs.setter + def attention_kwargs(self, value): + self._attention_kwargs = value + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: + logger.warning( + f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 128, + device: Optional[torch.device] = None, + ): + tokenizer = self.tokenizer + text_encoder = self.text_encoder + device = device or text_encoder.device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + prompt_embeds_list = [] + for p in prompt: + text_inputs = tokenizer( + p, + # padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device))[0] + + # Concat zeros to max_sequence + b, seq_len, dim = prompt_embeds.shape + if seq_len < max_sequence_length: + padding = torch.zeros( + (b, max_sequence_length - seq_len, dim), dtype=prompt_embeds.dtype, device=prompt_embeds.device + ) + prompt_embeds = torch.concat([prompt_embeds, padding], dim=1) + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=0) + prompt_embeds = prompt_embeds.to(device=device) + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, max_sequence_length, -1) + prompt_embeds = prompt_embeds.to(dtype=self.transformer.dtype) + return prompt_embeds + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // self.vae_scale_factor) + width = 2 * (int(width) // self.vae_scale_factor) + + shape = (batch_size, num_channels_latents, height, width) + + if latents is not None: + latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) + return latents.to(device=device, dtype=dtype), latent_image_ids + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + + latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) + + return latents, latent_image_ids + + @staticmethod + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + height = height // vae_scale_factor + width = width // vae_scale_factor + + latents = latents.view(batch_size, height, width, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) + + return latents + + @staticmethod + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height, width, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids.repeat(batch_size, 1, 1, 1) + latent_image_ids = latent_image_ids.reshape( + batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 30, + timesteps: List[int] = None, + guidance_scale: float = 5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 128, + clip_value: Union[None, float] = None, + normalize: bool = False, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.bria.BriaPipelineOutput`] instead of a plain tuple. + attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.bria.BriaPipelineOutput`] or `tuple`: [`~pipelines.bria.BriaPipelineOutput`] if `return_dict` + is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated + images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + height=height, + width=width, + prompt_embeds=prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self.attention_kwargs = attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = self.attention_kwargs.get("scale", None) if self.attention_kwargs is not None else None + + (prompt_embeds, negative_prompt_embeds, text_ids) = self.encode_prompt( + prompt=prompt, + negative_prompt=negative_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 # due to patch=2, we devide by 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + if ( + isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler) + and self.scheduler.config["use_dynamic_shifting"] + ): + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = latents.shape[1] + + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + else: + # 4. Prepare timesteps + # Sample from training sigmas + if isinstance(self.scheduler, DDIMScheduler) or isinstance( + self.scheduler, EulerAncestralDiscreteScheduler + ): + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, None, None + ) + else: + sigmas = get_original_sigmas( + num_train_timesteps=self.scheduler.config.num_train_timesteps, + num_inference_steps=num_inference_steps, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas=sigmas + ) + + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + if len(latent_image_ids.shape) == 3: + latent_image_ids = latent_image_ids[0] + if len(text_ids.shape) == 3: + text_ids = text_ids[0] + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + if type(self.scheduler) != FlowMatchEulerDiscreteScheduler: + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + # This is predicts "v" from flow-matching or eps from diffusion + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + attention_kwargs=self.attention_kwargs, + return_dict=False, + txt_ids=text_ids, + img_ids=latent_image_ids, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + cfg_noise_pred_text = noise_pred_text.std() + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if normalize: + noise_pred = noise_pred * (0.7 * (cfg_noise_pred_text / noise_pred.std())) + 0.3 * noise_pred + + if clip_value: + assert clip_value > 0 + noise_pred = noise_pred.clip(-clip_value, clip_value) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents.to(dtype=torch.float32) / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents.to(dtype=self.vae.dtype), return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return BriaPipelineOutput(images=image) diff --git a/src/diffusers/pipelines/bria/pipeline_output.py b/src/diffusers/pipelines/bria/pipeline_output.py new file mode 100644 index 0000000000..54eed06233 --- /dev/null +++ b/src/diffusers/pipelines/bria/pipeline_output.py @@ -0,0 +1,21 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class BriaPipelineOutput(BaseOutput): + """ + Output class for Bria pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py index 08a816ce4b..20380a449f 100644 --- a/src/diffusers/utils/dummy_pt_objects.py +++ b/src/diffusers/utils/dummy_pt_objects.py @@ -528,6 +528,21 @@ class AutoModel(metaclass=DummyObject): requires_backends(cls, ["torch"]) +class BriaTransformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + class CacheMixin(metaclass=DummyObject): _backends = ["torch"] diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 181cbdbc66..1885dc03bb 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -362,6 +362,21 @@ class AuraFlowPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class BriaPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class ChromaImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] diff --git a/tests/models/transformers/test_models_transformer_bria.py b/tests/models/transformers/test_models_transformer_bria.py new file mode 100644 index 0000000000..8a8d0dcecf --- /dev/null +++ b/tests/models/transformers/test_models_transformer_bria.py @@ -0,0 +1,181 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import BriaTransformer2DModel +from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 +from diffusers.models.embeddings import ImageProjection +from diffusers.utils.testing_utils import enable_full_determinism, torch_device + +from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +def create_bria_ip_adapter_state_dict(model): + # "ip_adapter" (cross-attention weights) + ip_cross_attn_state_dict = {} + key_id = 0 + + for name in model.attn_processors.keys(): + if name.startswith("single_transformer_blocks"): + continue + + joint_attention_dim = model.config["joint_attention_dim"] + hidden_size = model.config["num_attention_heads"] * model.config["attention_head_dim"] + sd = FluxIPAdapterJointAttnProcessor2_0( + hidden_size=hidden_size, cross_attention_dim=joint_attention_dim, scale=1.0 + ).state_dict() + ip_cross_attn_state_dict.update( + { + f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], + f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], + f"{key_id}.to_k_ip.bias": sd["to_k_ip.0.bias"], + f"{key_id}.to_v_ip.bias": sd["to_v_ip.0.bias"], + } + ) + + key_id += 1 + + # "image_proj" (ImageProjection layer weights) + + image_projection = ImageProjection( + cross_attention_dim=model.config["joint_attention_dim"], + image_embed_dim=model.config["pooled_projection_dim"], + num_image_text_embeds=4, + ) + + ip_image_projection_state_dict = {} + sd = image_projection.state_dict() + ip_image_projection_state_dict.update( + { + "proj.weight": sd["image_embeds.weight"], + "proj.bias": sd["image_embeds.bias"], + "norm.weight": sd["norm.weight"], + "norm.bias": sd["norm.bias"], + } + ) + + del sd + ip_state_dict = {} + ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) + return ip_state_dict + + +class BriaTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = BriaTransformer2DModel + main_input_name = "hidden_states" + # We override the items here because the transformer under consideration is small. + model_split_percents = [0.8, 0.7, 0.7] + + # Skip setting testing with default: AttnProcessor + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + text_ids = torch.randn((sequence_length, num_image_channels)).to(torch_device) + image_ids = torch.randn((height * width, num_image_channels)).to(torch_device) + timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "img_ids": image_ids, + "txt_ids": text_ids, + "timestep": timestep, + } + + @property + def input_shape(self): + return (16, 4) + + @property + def output_shape(self): + return (16, 4) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 1, + "in_channels": 4, + "num_layers": 1, + "num_single_layers": 1, + "attention_head_dim": 8, + "num_attention_heads": 2, + "joint_attention_dim": 32, + "pooled_projection_dim": None, + "axes_dims_rope": [0, 4, 4], + } + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_deprecated_inputs_img_txt_ids_3d(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output_1 = model(**inputs_dict).to_tuple()[0] + + # update inputs_dict with txt_ids and img_ids as 3d tensors (deprecated) + text_ids_3d = inputs_dict["txt_ids"].unsqueeze(0) + image_ids_3d = inputs_dict["img_ids"].unsqueeze(0) + + assert text_ids_3d.ndim == 3, "text_ids_3d should be a 3d tensor" + assert image_ids_3d.ndim == 3, "img_ids_3d should be a 3d tensor" + + inputs_dict["txt_ids"] = text_ids_3d + inputs_dict["img_ids"] = image_ids_3d + + with torch.no_grad(): + output_2 = model(**inputs_dict).to_tuple()[0] + + self.assertEqual(output_1.shape, output_2.shape) + self.assertTrue( + torch.allclose(output_1, output_2, atol=1e-5), + msg="output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) are not equal as them as 2d inputs", + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"BriaTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class BriaTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = BriaTransformer2DModel + + def prepare_init_args_and_inputs_for_common(self): + return BriaTransformerTests().prepare_init_args_and_inputs_for_common() + + +class BriaTransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): + model_class = BriaTransformer2DModel + + def prepare_init_args_and_inputs_for_common(self): + return BriaTransformerTests().prepare_init_args_and_inputs_for_common() diff --git a/tests/pipelines/bria/__init__.py b/tests/pipelines/bria/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/pipelines/bria/test_pipeline_bria.py b/tests/pipelines/bria/test_pipeline_bria.py new file mode 100644 index 0000000000..e6dec4ddc0 --- /dev/null +++ b/tests/pipelines/bria/test_pipeline_bria.py @@ -0,0 +1,318 @@ +# Copyright 2024 Bria AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest + +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from transformers import T5EncoderModel, T5TokenizerFast + +from diffusers import ( + AutoencoderKL, + BriaTransformer2DModel, + FlowMatchEulerDiscreteScheduler, +) +from diffusers.pipelines.bria import BriaPipeline +from diffusers.utils.testing_utils import ( + enable_full_determinism, + numpy_cosine_similarity_distance, + require_accelerator, + require_torch_gpu, + slow, + torch_device, +) + +# from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist +from tests.pipelines.test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class BriaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = BriaPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds"]) + batch_params = frozenset(["prompt"]) + test_xformers_attention = False + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = BriaTransformer2DModel( + patch_size=1, + in_channels=16, + num_layers=1, + num_single_layers=1, + attention_head_dim=8, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=None, + axes_dims_rope=[0, 4, 4], + ) + + torch.manual_seed(0) + vae = AutoencoderKL( + act_fn="silu", + block_out_channels=(32,), + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D"], + latent_channels=4, + sample_size=32, + shift_factor=0, + scaling_factor=0.13025, + use_post_quant_conv=True, + use_quant_conv=True, + force_upcast=False, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = T5TokenizerFast.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "negative_prompt": "bad, ugly", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 16, + "width": 16, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_encode_prompt_works_in_isolation(self): + pass + + def test_bria_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + assert max_diff > 1e-6 + + def test_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self, expected_max_diff=1e-2): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if name == "vae": + continue + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess( + max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." + ) + + def test_bria_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(16, 16), (32, 32), (64, 64)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] + self.assertTrue([dtype == torch.float32 for dtype in model_dtypes] == [True, True, True]) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + torch_dtype_dict = {"transformer": torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype_dict) + + self.assertEqual(loaded_pipe.transformer.dtype, torch.bfloat16) + self.assertEqual(loaded_pipe.text_encoder.dtype, torch.float16) + self.assertEqual(loaded_pipe.vae.dtype, torch.float16) + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + torch_dtype_dict = {"default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype_dict) + + self.assertEqual(loaded_pipe.transformer.dtype, torch.float16) + self.assertEqual(loaded_pipe.text_encoder.dtype, torch.float16) + self.assertEqual(loaded_pipe.vae.dtype, torch.float16) + + +@slow +@require_torch_gpu +class BriaPipelineSlowTests(unittest.TestCase): + pipeline_class = BriaPipeline + repo_id = "briaai/BRIA-3.2" + + def setUp(self): + super().setUp() + gc.collect() + torch.cuda.empty_cache() + + def tearDown(self): + super().tearDown() + gc.collect() + torch.cuda.empty_cache() + + def get_inputs(self, device, seed=0): + generator = torch.Generator(device="cpu").manual_seed(seed) + + prompt_embeds = torch.load( + hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") + ).to(torch_device) + + return { + "prompt_embeds": prompt_embeds, + "num_inference_steps": 2, + "guidance_scale": 0.0, + "max_sequence_length": 256, + "output_type": "np", + "generator": generator, + } + + def test_bria_inference_bf16(self): + pipe = self.pipeline_class.from_pretrained( + self.repo_id, torch_dtype=torch.bfloat16, text_encoder=None, tokenizer=None + ) + pipe.to(torch_device) + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10].flatten() + + expected_slice = np.array( + [ + 0.59729785, + 0.6153719, + 0.595112, + 0.5884763, + 0.59366125, + 0.5795311, + 0.58325, + 0.58449626, + 0.57737637, + 0.58432233, + 0.5867875, + 0.57824117, + 0.5819089, + 0.5830988, + 0.57730293, + 0.57647324, + 0.5769151, + 0.57312685, + 0.57926565, + 0.5823928, + 0.57783926, + 0.57162863, + 0.575649, + 0.5745547, + 0.5740556, + 0.5799735, + 0.57799566, + 0.5715559, + 0.5771242, + 0.5773058, + ], + dtype=np.float32, + ) + max_diff = numpy_cosine_similarity_distance(expected_slice, image_slice) + self.assertLess(max_diff, 1e-4, f"Image slice is different from expected slice: {max_diff:.4f}") From 4fcd0bc7ebb934a1559d0b516f09534ba22c8a0d Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 20 Aug 2025 15:51:49 +0530 Subject: [PATCH 02/74] [chore] remove extra validation check in determine_device_map (#12176) remove extra validation check in determine_device_map --- src/diffusers/models/model_loading_utils.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/diffusers/models/model_loading_utils.py b/src/diffusers/models/model_loading_utils.py index 2e07f55e00..8b48ba6b48 100644 --- a/src/diffusers/models/model_loading_utils.py +++ b/src/diffusers/models/model_loading_utils.py @@ -112,9 +112,6 @@ def _determine_device_map( device_map_kwargs["max_memory"] = max_memory device_map = infer_auto_device_map(model, dtype=target_dtype, **device_map_kwargs) - if hf_quantizer is not None: - hf_quantizer.validate_environment(device_map=device_map) - return device_map From 91a151b5c698836ce3bac85fd9b5a2b10a726c99 Mon Sep 17 00:00:00 2001 From: Sam Yuan Date: Wed, 20 Aug 2025 23:49:19 +0800 Subject: [PATCH 03/74] continue translate document to zh (#12194) Signed-off-by: SamYuan1990 --- docs/source/zh/_toctree.yml | 56 ++ .../zh/hybrid_inference/api_reference.md | 9 + docs/source/zh/hybrid_inference/overview.md | 55 ++ docs/source/zh/hybrid_inference/vae_encode.md | 184 +++++ .../modular_diffusers/components_manager.md | 188 +++++ docs/source/zh/modular_diffusers/guiders.md | 173 +++++ docs/source/zh/optimization/cache.md | 67 ++ docs/source/zh/optimization/coreml.md | 163 +++++ docs/source/zh/optimization/deepcache.md | 59 ++ docs/source/zh/optimization/habana.md | 28 + docs/source/zh/optimization/memory.md | 581 ++++++++++++++++ docs/source/zh/optimization/mps.md | 82 +++ docs/source/zh/optimization/neuron.md | 59 ++ docs/source/zh/optimization/open_vino.md | 77 +++ docs/source/zh/optimization/para_attn.md | 497 ++++++++++++++ docs/source/zh/optimization/pruna.md | 184 +++++ .../zh/optimization/speed-memory-optims.md | 200 ++++++ docs/source/zh/optimization/tgate.md | 182 +++++ docs/source/zh/optimization/tome.md | 90 +++ docs/source/zh/optimization/xdit.md | 119 ++++ .../zh/training/distributed_inference.md | 239 +++++++ docs/source/zh/training/dreambooth.md | 643 ++++++++++++++++++ docs/source/zh/training/instructpix2pix.md | 255 +++++++ docs/source/zh/training/kandinsky.md | 328 +++++++++ docs/source/zh/training/wuerstchen.md | 191 ++++++ 25 files changed, 4709 insertions(+) create mode 100644 docs/source/zh/hybrid_inference/api_reference.md create mode 100644 docs/source/zh/hybrid_inference/overview.md create mode 100644 docs/source/zh/hybrid_inference/vae_encode.md create mode 100644 docs/source/zh/modular_diffusers/components_manager.md create mode 100644 docs/source/zh/modular_diffusers/guiders.md create mode 100644 docs/source/zh/optimization/cache.md create mode 100644 docs/source/zh/optimization/coreml.md create mode 100644 docs/source/zh/optimization/deepcache.md create mode 100644 docs/source/zh/optimization/habana.md create mode 100644 docs/source/zh/optimization/memory.md create mode 100644 docs/source/zh/optimization/mps.md create mode 100644 docs/source/zh/optimization/neuron.md create mode 100644 docs/source/zh/optimization/open_vino.md create mode 100644 docs/source/zh/optimization/para_attn.md create mode 100644 docs/source/zh/optimization/pruna.md create mode 100644 docs/source/zh/optimization/speed-memory-optims.md create mode 100644 docs/source/zh/optimization/tgate.md create mode 100644 docs/source/zh/optimization/tome.md create mode 100644 docs/source/zh/optimization/xdit.md create mode 100644 docs/source/zh/training/distributed_inference.md create mode 100644 docs/source/zh/training/dreambooth.md create mode 100644 docs/source/zh/training/instructpix2pix.md create mode 100644 docs/source/zh/training/kandinsky.md create mode 100644 docs/source/zh/training/wuerstchen.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 3daeaeaf79..337d010fc7 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -15,15 +15,49 @@ - local: using-diffusers/schedulers title: Load schedulers and models +- title: Inference + isExpanded: false + sections: + - local: training/distributed_inference + title: Distributed inference + - title: Inference optimization isExpanded: false sections: - local: optimization/fp16 title: Accelerate inference + - local: optimization/cache + title: Caching + - local: optimization/memory + title: Reduce memory usage + - local: optimization/speed-memory-optims + title: Compile and offloading quantized models - title: Community optimizations sections: + - local: optimization/pruna + title: Pruna - local: optimization/xformers title: xFormers + - local: optimization/tome + title: Token merging + - local: optimization/deepcache + title: DeepCache + - local: optimization/tgate + title: TGATE + - local: optimization/xdit + title: xDiT + - local: optimization/para_attn + title: ParaAttention + +- title: Hybrid Inference + isExpanded: false + sections: + - local: hybrid_inference/overview + title: Overview + - local: hybrid_inference/vae_encode + title: VAE Encode + - local: hybrid_inference/api_reference + title: API Reference - title: Modular Diffusers isExpanded: false @@ -44,6 +78,10 @@ title: AutoPipelineBlocks - local: modular_diffusers/modular_pipeline title: ModularPipeline + - local: modular_diffusers/components_manager + title: ComponentsManager + - local: modular_diffusers/guiders + title: Guiders - title: Training isExpanded: false @@ -56,12 +94,20 @@ sections: - local: training/text2image title: Text-to-image + - local: training/kandinsky + title: Kandinsky 2.2 + - local: training/wuerstchen + title: Wuerstchen - local: training/controlnet title: ControlNet + - local: training/instructpix2pix + title: InstructPix2Pix - title: Methods sections: - local: training/text_inversion title: Textual Inversion + - local: training/dreambooth + title: DreamBooth - local: training/lora title: LoRA @@ -70,6 +116,16 @@ sections: - local: optimization/onnx title: ONNX + - local: optimization/open_vino + title: OpenVINO + - local: optimization/coreml + title: Core ML + - local: optimization/mps + title: Metal Performance Shaders (MPS) + - local: optimization/habana + title: Intel Gaudi + - local: optimization/neuron + title: AWS Neuron - title: Specific pipeline examples isExpanded: false diff --git a/docs/source/zh/hybrid_inference/api_reference.md b/docs/source/zh/hybrid_inference/api_reference.md new file mode 100644 index 0000000000..74f6a35ec2 --- /dev/null +++ b/docs/source/zh/hybrid_inference/api_reference.md @@ -0,0 +1,9 @@ +# 混合推理 API 参考 + +## 远程解码 + +[[autodoc]] utils.remote_utils.remote_decode + +## 远程编码 + +[[autodoc]] utils.remote_utils.remote_encode \ No newline at end of file diff --git a/docs/source/zh/hybrid_inference/overview.md b/docs/source/zh/hybrid_inference/overview.md new file mode 100644 index 0000000000..4d77d0abc2 --- /dev/null +++ b/docs/source/zh/hybrid_inference/overview.md @@ -0,0 +1,55 @@ + + +# 混合推理 + +**通过混合推理赋能本地 AI 构建者** + +> [!TIP] +> 混合推理是一项[实验性功能](https://huggingface.co/blog/remote_vae)。 +> 可以在此处提供反馈[此处](https://github.com/huggingface/diffusers/issues/new?template=remote-vae-pilot-feedback.yml)。 + +## 为什么使用混合推理? + +混合推理提供了一种快速简单的方式来卸载本地生成需求。 + +- 🚀 **降低要求:** 无需昂贵硬件即可访问强大模型。 +- 💎 **无妥协:** 在不牺牲性能的情况下实现最高质量。 +- 💰 **成本效益高:** 它是免费的!🤑 +- 🎯 **多样化用例:** 与 Diffusers � 和更广泛的社区完全兼容。 +- 🔧 **开发者友好:** 简单请求,快速响应。 + +--- + +## 可用模型 + +* **VAE 解码 🖼️:** 快速将潜在表示解码为高质量图像,不影响性能或工作流速度。 +* **VAE 编码 🔢:** 高效将图像编码为潜在表示,用于生成和训练。 +* **文本编码器 📃(即将推出):** 快速准确地计算提示的文本嵌入,确保流畅高质量的工作流。 + +--- + +## 集成 + +* **[SD.Next](https://github.com/vladmandic/sdnext):** 一体化 UI,直接支持混合推理。 +* **[ComfyUI-HFRemoteVae](https://github.com/kijai/ComfyUI-HFRemoteVae):** 用于混合推理的 ComfyUI 节点。 + +## 更新日志 + +- 2025 年 3 月 10 日:添加了 VAE 编码 +- 2025 年 3 月 2 日:初始发布,包含 VAE 解码 + +## 内容 + +文档分为三个部分: + +* **VAE 解码** 学习如何使用混合推理进行 VAE 解码的基础知识。 +* **VAE 编码** 学习如何使用混合推理进行 VAE 编码的基础知识。 +* **API 参考** 深入了解任务特定设置和参数。 \ No newline at end of file diff --git a/docs/source/zh/hybrid_inference/vae_encode.md b/docs/source/zh/hybrid_inference/vae_encode.md new file mode 100644 index 0000000000..30aee9a6bf --- /dev/null +++ b/docs/source/zh/hybrid_inference/vae_encode.md @@ -0,0 +1,184 @@ +# 入门:使用混合推理进行 VAE 编码 + +VAE 编码用于训练、图像到图像和图像到视频——将图像或视频转换为潜在表示。 + +## 内存 + +这些表格展示了在不同 GPU 上使用 SD v1 和 SD XL 进行 VAE 编码的 VRAM 需求。 + +对于这些 GPU 中的大多数,内存使用百分比决定了其他模型(文本编码器、UNet/Transformer)必须被卸载,或者必须使用分块编码,这会增加时间并影响质量。 + +
SD v1.5 + +| GPU | 分辨率 | 时间(秒) | 内存(%) | 分块时间(秒) | 分块内存(%) | +|:------------------------------|:-------------|-----------------:|-------------:|--------------------:|-------------------:| +| NVIDIA GeForce RTX 4090 | 512x512 | 0.015 | 3.51901 | 0.015 | 3.51901 | +| NVIDIA GeForce RTX 4090 | 256x256 | 0.004 | 1.3154 | 0.005 | 1.3154 | +| NVIDIA GeForce RTX 4090 | 2048x2048 | 0.402 | 47.1852 | 0.496 | 3.51901 | +| NVIDIA GeForce RTX 4090 | 1024x1024 | 0.078 | 12.2658 | 0.094 | 3.51901 | +| NVIDIA GeForce RTX 4080 SUPER | 512x512 | 0.023 | 5.30105 | 0.023 | 5.30105 | +| NVIDIA GeForce RTX 4080 SUPER | 256x256 | 0.006 | 1.98152 | 0.006 | 1.98152 | +| NVIDIA GeForce RTX 4080 SUPER | 2048x2048 | 0.574 | 71.08 | 0.656 | 5.30105 | +| NVIDIA GeForce RTX 4080 SUPER | 1024x1024 | 0.111 | 18.4772 | 0.14 | 5.30105 | +| NVIDIA GeForce RTX 3090 | 512x512 | 0.032 | 3.52782 | 0.032 | 3.52782 | +| NVIDIA GeForce RTX 3090 | 256x256 | 0.01 | 1.31869 | 0.009 | 1.31869 | +| NVIDIA GeForce RTX 3090 | 2048x2048 | 0.742 | 47.3033 | 0.954 | 3.52782 | +| NVIDIA GeForce RTX 3090 | 1024x1024 | 0.136 | 12.2965 | 0.207 | 3.52782 | +| NVIDIA GeForce RTX 3080 | 512x512 | 0.036 | 8.51761 | 0.036 | 8.51761 | +| NVIDIA GeForce RTX 3080 | 256x256 | 0.01 | 3.18387 | 0.01 | 3.18387 | +| NVIDIA GeForce RTX 3080 | 2048x2048 | 0.863 | 86.7424 | 1.191 | 8.51761 | +| NVIDIA GeForce RTX 3080 | 1024x1024 | 0.157 | 29.6888 | 0.227 | 8.51761 | +| NVIDIA GeForce RTX 3070 | 512x512 | 0.051 | 10.6941 | 0.051 | 10.6941 | +| NVIDIA GeForce RTX 3070 | 256x256 | 0.015 | +| 3.99743 | 0.015 | 3.99743 | +| NVIDIA GeForce RTX 3070 | 2048x2048 | 1.217 | 96.054 | 1.482 | 10.6941 | +| NVIDIA GeForce RTX 3070 | 1024x1024 | 0.223 | 37.2751 | 0.327 | 10.6941 | + +
+ +
SDXL + +| GPU | Resolution | Time (seconds) | Memory Consumed (%) | Tiled Time (seconds) | Tiled Memory (%) | +|:------------------------------|:-------------|-----------------:|----------------------:|-----------------------:|-------------------:| +| NVIDIA GeForce RTX 4090 | 512x512 | 0.029 | 4.95707 | 0.029 | 4.95707 | +| NVIDIA GeForce RTX 4090 | 256x256 | 0.007 | 2.29666 | 0.007 | 2.29666 | +| NVIDIA GeForce RTX 4090 | 2048x2048 | 0.873 | 66.3452 | 0.863 | 15.5649 | +| NVIDIA GeForce RTX 4090 | 1024x1024 | 0.142 | 15.5479 | 0.143 | 15.5479 | +| NVIDIA GeForce RTX 4080 SUPER | 512x512 | 0.044 | 7.46735 | 0.044 | 7.46735 | +| NVIDIA GeForce RTX 4080 SUPER | 256x256 | 0.01 | 3.4597 | 0.01 | 3.4597 | +| NVIDIA GeForce RTX 4080 SUPER | 2048x2048 | 1.317 | 87.1615 | 1.291 | 23.447 | +| NVIDIA GeForce RTX 4080 SUPER | 1024x1024 | 0.213 | 23.4215 | 0.214 | 23.4215 | +| NVIDIA GeForce RTX 3090 | 512x512 | 0.058 | 5.65638 | 0.058 | 5.65638 | +| NVIDIA GeForce RTX 3090 | 256x256 | 0.016 | 2.45081 | 0.016 | 2.45081 | +| NVIDIA GeForce RTX 3090 | 2048x2048 | 1.755 | 77.8239 | 1.614 | 18.4193 | +| NVIDIA GeForce RTX 3090 | 1024x1024 | 0.265 | 18.4023 | 0.265 | 18.4023 | +| NVIDIA GeForce RTX 3080 | 512x512 | 0.064 | 13.6568 | 0.064 | 13.6568 | +| NVIDIA GeForce RTX 3080 | 256x256 | 0.018 | 5.91728 | 0.018 | 5.91728 | +| NVIDIA GeForce RTX 3080 | 2048x2048 | 内存不足 (OOM) | 内存不足 (OOM) | 1.866 | 44.4717 | +| NVIDIA GeForce RTX 3080 | 1024x1024 | 0.302 | 44.4308 | 0.302 | 44.4308 | +| NVIDIA GeForce RTX 3070 | 512x512 | 0.093 | 17.1465 | 0.093 | 17.1465 | +| NVIDIA GeForce R +| NVIDIA GeForce RTX 3070 | 256x256 | 0.025 | 7.42931 | 0.026 | 7.42931 | +| NVIDIA GeForce RTX 3070 | 2048x2048 | OOM | OOM | 2.674 | 55.8355 | +| NVIDIA GeForce RTX 3070 | 1024x1024 | 0.443 | 55.7841 | 0.443 | 55.7841 | + +
+ +## 可用 VAE + +| | **端点** | **模型** | +|:-:|:-----------:|:--------:| +| **Stable Diffusion v1** | [https://qc6479g0aac6qwy9.us-east-1.aws.endpoints.huggingface.cloud](https://qc6479g0aac6qwy9.us-east-1.aws.endpoints.huggingface.cloud) | [`stabilityai/sd-vae-ft-mse`](https://hf.co/stabilityai/sd-vae-ft-mse) | +| **Stable Diffusion XL** | [https://xjqqhmyn62rog84g.us-east-1.aws.endpoints.huggingface.cloud](https://xjqqhmyn62rog84g.us-east-1.aws.endpoints.huggingface.cloud) | [`madebyollin/sdxl-vae-fp16-fix`](https://hf.co/madebyollin/sdxl-vae-fp16-fix) | +| **Flux** | [https://ptccx55jz97f9zgo.us-east-1.aws.endpoints.huggingface.cloud](https://ptccx55jz97f9zgo.us-east-1.aws.endpoints.huggingface.cloud) | [`black-forest-labs/FLUX.1-schnell`](https://hf.co/black-forest-labs/FLUX.1-schnell) | + + +> [!TIP] +> 模型支持可以在此处请求:[这里](https://github.com/huggingface/diffusers/issues/new?template=remote-vae-pilot-feedback.yml)。 + + +## 代码 + +> [!TIP] +> 从 `main` 安装 `diffusers` 以运行代码:`pip install git+https://github.com/huggingface/diffusers@main` + + +一个辅助方法简化了与混合推理的交互。 + +```python +from diffusers.utils.remote_utils import remote_encode +``` + +### 基本示例 + +让我们编码一张图像,然后解码以演示。 + +
+ +
+ +
代码 + +```python +from diffusers.utils import load_image +from diffusers.utils.remote_utils import remote_decode + +image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg?download=true") + +latent = remote_encode( + endpoint="https://ptccx55jz97f9zgo.us-east-1.aws.endpoints.huggingface.cloud/", + scaling_factor=0.3611, + shift_factor=0.1159, +) + +decoded = remote_decode( + endpoint="https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/", + tensor=latent, + scaling_factor=0.3611, + shift_factor=0.1159, +) +``` + +
+ +
+ +
+ + +### 生成 + +现在让我们看一个生成示例,我们将编码图像,生成,然后远程解码! + +
代码 + +```python +import torch +from diffusers import StableDiffusionImg2ImgPip +from diffusers.utils import load_image +from diffusers.utils.remote_utils import remote_decode, remote_encode + +pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + torch_dtype=torch.float16, + variant="fp16", + vae=None, +).to("cuda") + +init_image = load_image( + "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" +) +init_image = init_image.resize((768, 512)) + +init_latent = remote_encode( + endpoint="https://qc6479g0aac6qwy9.us-east-1.aws.endpoints.huggingface.cloud/", + image=init_image, + scaling_factor=0.18215, +) + +prompt = "A fantasy landscape, trending on artstation" +latent = pipe( + prompt=prompt, + image=init_latent, + strength=0.75, + output_type="latent", +).images + +image = remote_decode( + endpoint="https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/", + tensor=latent, + scaling_factor=0.18215, +) +image.save("fantasy_landscape.jpg") +``` + +
+ +
+ +
+ +## 集成 + +* **[SD.Next](https://github.com/vladmandic/sdnext):** 具有直接支持混合推理功能的一体化用户界面。 +* **[ComfyUI-HFRemoteVae](https://github.com/kijai/ComfyUI-HFRemoteVae):** 用于混合推理的 ComfyUI 节点。 \ No newline at end of file diff --git a/docs/source/zh/modular_diffusers/components_manager.md b/docs/source/zh/modular_diffusers/components_manager.md new file mode 100644 index 0000000000..8b4425027f --- /dev/null +++ b/docs/source/zh/modular_diffusers/components_manager.md @@ -0,0 +1,188 @@ + + +# 组件管理器 + +[`ComponentsManager`] 是 Modular Diffusers 的模型注册和管理系统。它添加和跟踪模型,存储有用的元数据(模型大小、设备放置、适配器),防止重复模型实例,并支持卸载。 + +本指南将展示如何使用 [`ComponentsManager`] 来管理组件和设备内存。 + +## 添加组件 + +[`ComponentsManager`] 应与 [`ModularPipeline`] 一起创建,在 [`~ModularPipeline.from_pretrained`] 或 [`~ModularPipelineBlocks.init_pipeline`] 中。 + +> [!TIP] +> `collection` 参数是可选的,但可以更轻松地组织和管理组件。 + + + + +```py +from diffusers import ModularPipeline, ComponentsManager + +comp = ComponentsManager() +pipe = ModularPipeline.from_pretrained("YiYiXu/modular-demo-auto", components_manager=comp, collection="test1") +``` + + + + +```py +from diffusers import ComponentsManager +from diffusers.modular_pipelines import SequentialPipelineBlocks +from diffusers.modular_pipelines.stable_diffusion_xl import TEXT2IMAGE_BLOCKS + +t2i_blocks = SequentialPipelineBlocks.from_blocks_dict(TEXT2IMAGE_BLOCKS) + +modular_repo_id = "YiYiXu/modular-loader-t2i-0704" +components = ComponentsManager() +t2i_pipeline = t2i_blocks.init_pipeline(modular_repo_id, components_manager=components) +``` + + + + +组件仅在调用 [`~ModularPipeline.load_components`] 或 [`~ModularPipeline.load_default_components`] 时加载和注册。以下示例使用 [`~ModularPipeline.load_default_components`] 创建第二个管道,重用第一个管道的所有组件,并将其分配到不同的集合。 + +```py +pipe.load_default_components() +pipe2 = ModularPipeline.from_pretrained("YiYiXu/modular-demo-auto", components_manager=comp, collection="test2") +``` + +使用 [`~ModularPipeline.null_component_names`] 属性来识别需要加载的任何组件,使用 [`~ComponentsManager.get_components_by_names`] 检索它们,然后调用 [`~ModularPipeline.update_components`] 来添加缺失的组件。 + +```py +pipe2.null_component_names +['text_encoder', 'text_encoder_2', 'tokenizer', 'tokenizer_2', 'image_encoder', 'unet', 'vae', 'scheduler', 'controlnet'] + +comp_dict = comp.get_components_by_names(names=pipe2.null_component_names) +pipe2.update_components(**comp_dict) +``` + +要添加单个组件,请使用 [`~ComponentsManager.add`] 方法。这会使用唯一 id 注册一个组件。 + +```py +from diffusers import AutoModel + +text_encoder = AutoModel.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder") +component_id = comp.add("text_encoder", text_encoder) +comp +``` + +使用 [`~ComponentsManager.remove`] 通过其 id 移除一个组件。 + +```py +comp.remove("text_encoder_139917733042864") +``` + +## 检索组件 + +[`ComponentsManager`] 提供了几种方法来检索已注册的组件。 + +### get_one + +[`~ComponentsManager.get_one`] 方法返回单个组件,并支持对 `name` 参数进行模式匹配。如果多个组件匹配,[`~ComponentsManager.get_one`] 会返回错误。 + +| 模式 | 示例 | 描述 | +|-------------|----------------------------------|-------------------------------------------| +| exact | `comp.get_one(name="unet")` | 精确名称匹配 | +| wildcard | `comp.get_one(name="unet*")` | 名称以 "unet" 开头 | +| exclusion | `comp.get_one(name="!unet")` | 排除名为 "unet" 的组件 | +| or | `comp.get_one(name="unet|vae")` | 名称为 "unet" 或 "vae" | + +[`~ComponentsManager.get_one`] 还通过 `collection` 参数或 `load_id` 参数过滤组件。 + +```py +comp.get_one(name="unet", collection="sdxl") +``` + +### get_components_by_names + +[`~ComponentsManager.get_components_by_names`] 方法接受一个名称列表,并返回一个将名称映射到组件的字典。这在 [`ModularPipeline`] 中特别有用,因为它们提供了所需组件名称的列表,并且返回的字典可以直接传递给 [`~ModularPipeline.update_components`]。 + +```py +component_dict = comp.get_components_by_names(names=["text_encoder", "unet", "vae"]) +{"text_encoder": component1, "unet": component2, "vae": component3} +``` + +## 重复检测 + +建议使用 [`ComponentSpec`] 加载模型组件,以分配具有唯一 id 的组件,该 id 编码了它们的加载参数。这允许 [`ComponentsManager`] 自动检测并防止重复的模型实例,即使不同的对象代表相同的底层检查点。 + +```py +from diffusers import ComponentSpec, ComponentsManager +from transformers import CLIPTextModel + +comp = ComponentsManager() + +# 为第一个文本编码器创建 ComponentSpec +spec = ComponentSpec(name="text_encoder", repo="stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder", type_hint=AutoModel) +# 为重复的文本编码器创建 ComponentSpec(它是相同的检查点,来自相同的仓库/子文件夹) +spec_duplicated = ComponentSpec(name="text_encoder_duplicated", repo="stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder", ty +pe_hint=CLIPTextModel) + +# 加载并添加两个组件 - 管理器会检测到它们是同一个模型 +comp.add("text_encoder", spec.load()) +comp.add("text_encoder_duplicated", spec_duplicated.load()) +``` + +这会返回一个警告,附带移除重复项的说明。 + +```py +ComponentsManager: adding component 'text_encoder_duplicated_139917580682672', but it has duplicate load_id 'stabilityai/stable-diffusion-xl-base-1.0|text_encoder|null|null' with existing components: text_encoder_139918506246832. To remove a duplicate, call `components_manager.remove('')`. +'text_encoder_duplicated_139917580682672' +``` + +您也可以不使用 [`ComponentSpec`] 添加组件,并且在大多数情况下,即使您以不同名称添加相同组件,重复检测仍然有效。 + +然而,当您将相同组件加载到不同对象时,[`ComponentManager`] 无法检测重复项。在这种情况下,您应该使用 [`ComponentSpec`] 加载模型。 + +```py +text_encoder_2 = AutoModel.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder") +comp.add("text_encoder", text_encoder_2) +'text_encoder_139917732983664' +``` + +## 集合 + +集合是为组件分配的标签,用于更好的组织和管理。使用 [`~ComponentsManager.add`] 中的 `collection` 参数将组件添加到集合中。 + +每个集合中只允许每个名称有一个组件。添加第二个同名组件会自动移除第一个组件。 + +```py +from diffusers import ComponentSpec, ComponentsManager + +comp = ComponentsManager() +# 为第一个 UNet 创建 ComponentSpec +spec = ComponentSpec(name="unet", repo="stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", type_hint=AutoModel) +# 为另一个 UNet 创建 ComponentSpec +spec2 = ComponentSpec(name="unet", repo="RunDiffusion/Juggernaut-XL-v9", subfolder="unet", type_hint=AutoModel, variant="fp16") + +# 将两个 UNet 添加到同一个集合 - 第二个将替换第一个 +comp.add("unet", spec.load(), collection="sdxl") +comp.add("unet", spec2.load(), collection="sdxl") +``` + +这使得在基于节点的系统中工作变得方便,因为您可以: + +- 使用 `collection` 标签标记所有从一个节点加载的模型。 +- 当新检查点以相同名称加载时自动替换模型。 +- 当节点被移除时批量删除集合中的所有模型。 + +## 卸载 + +[`~ComponentsManager.enable_auto_cpu_offload`] 方法是一种全局卸载策略,适用于所有模型,无论哪个管道在使用它们。一旦启用,您无需担心设备放置,如果您添加或移除组件。 + +```py +comp.enable_auto_cpu_offload(device="cuda") +``` + +所有模型开始时都在 CPU 上,[`ComponentsManager`] 在需要它们之前将它们移动到适当的设备,并在 GPU 内存不足时将其他模型移回 CPU。 + +您可以设置自己的规则来决定哪些模型要卸载。 \ No newline at end of file diff --git a/docs/source/zh/modular_diffusers/guiders.md b/docs/source/zh/modular_diffusers/guiders.md new file mode 100644 index 0000000000..d0b5fb4312 --- /dev/null +++ b/docs/source/zh/modular_diffusers/guiders.md @@ -0,0 +1,173 @@ + + +# 引导器 + +[Classifier-free guidance](https://huggingface.co/papers/2207.12598) 引导模型生成更好地匹配提示,通常用于提高生成质量、控制和提示的遵循度。有不同类型的引导方法,在 Diffusers 中,它们被称为*引导器*。与块类似,可以轻松切换和使用不同的引导器以适应不同的用例,而无需重写管道。 + +本指南将向您展示如何切换引导器、调整引导器参数,以及将它们加载并共享到 Hub。 + +## 切换引导器 + +[`ClassifierFreeGuidance`] 是默认引导器,在使用 [`~ModularPipelineBlocks.init_pipeline`] 初始化管道时创建。它通过 `from_config` 创建,这意味着它不需要从模块化存储库加载规范。引导器不会列在 `modular_model_index.json` 中。 + +使用 [`~ModularPipeline.get_component_spec`] 来检查引导器。 + +```py +t2i_pipeline.get_component_spec("guider") +ComponentSpec(name='guider', type_hint=, description=None, config=FrozenDict([('guidance_scale', 7.5), ('guidance_rescale', 0.0), ('use_original_formulation', False), ('start', 0.0), ('stop', 1.0), ('_use_default_values', ['start', 'guidance_rescale', 'stop', 'use_original_formulation'])]), repo=None, subfolder=None, variant=None, revision=None, default_creation_method='from_config') +``` + +通过将新引导器传递给 [`~ModularPipeline.update_components`] 来切换到不同的引导器。 + +> [!TIP] +> 更改引导器将返回文本,让您知道您正在更改引导器类型。 +> ```bash +> ModularPipeline.update_components: 添加具有新类型的引导器: PerturbedAttentionGuidance, 先前类型: ClassifierFreeGuidance +> ``` + +```py +from diffusers import LayerSkipConfig, PerturbedAttentionGuidance + +config = LayerSkipConfig(indices=[2, 9], fqn="mid_block.attentions.0.transformer_blocks", skip_attention=False, skip_attention_scores=True, skip_ff=False) +guider = PerturbedAttentionGuidance( + guidance_scale=5.0, perturbed_guidance_scale=2.5, perturbed_guidance_config=config +) +t2i_pipeline.update_components(guider=guider) +``` + +再次使用 [`~ModularPipeline.get_component_spec`] 来验证引导器类型是否不同。 + +```py +t2i_pipeline.get_component_spec("guider") +ComponentSpec(name='guider', type_hint=, description=None, config=FrozenDict([('guidance_scale', 5.0), ('perturbed_guidance_scale', 2.5), ('perturbed_guidance_start', 0.01), ('perturbed_guidance_stop', 0.2), ('perturbed_guidance_layers', None), ('perturbed_guidance_config', LayerSkipConfig(indices=[2, 9], fqn='mid_block.attentions.0.transformer_blocks', skip_attention=False, skip_attention_scores=True, skip_ff=False, dropout=1.0)), ('guidance_rescale', 0.0), ('use_original_formulation', False), ('start', 0.0), ('stop', 1.0), ('_use_default_values', ['perturbed_guidance_start', 'use_original_formulation', 'perturbed_guidance_layers', 'stop', 'start', 'guidance_rescale', 'perturbed_guidance_stop']), ('_class_name', 'PerturbedAttentionGuidance'), ('_diffusers_version', '0.35.0.dev0')]), repo=None, subfolder=None, variant=None, revision=None, default_creation_method='from_config') +``` + +## 加载自定义引导器 + +已经在 Hub 上保存并带有 `modular_model_index.json` 文件的引导器现在被视为 `from_pretrained` 组件,而不是 `from_config` 组件。 + +```json +{ + "guider": [ + null, + null, + { + "repo": "YiYiXu/modular-loader-t2i-guider", + "revision": null, + "subfolder": "pag_guider", + "type_hint": [ + "diffusers", + "PerturbedAttentionGuidance" + ], + "variant": null + } + ] +} +``` + +引导器只有在调用 [`~ModularPipeline.load_default_components`] 之后才会创建,基于 `modular_model_index.json` 中的加载规范。 + +```py +t2i_pipeline = t2i_blocks.init_pipeline("YiYiXu/modular-doc-guider") +# 在初始化时未创建 +assert t2i_pipeline.guider is None +t2i_pipeline.load_default_components() +# 加载为 PAG 引导器 +t2i_pipeline.guider +``` + +## 更改引导器参数 + +引导器参数可以通过 [`~ComponentSpec.create`] 方法或 [`~ModularPipeline.update_components`] 方法进行调整。下面的示例更改了 `guidance_scale` 值。 + + + + +```py +guider_spec = t2i_pipeline.get_component_spec("guider") +guider = guider_spec.create(guidance_scale=10) +t2i_pipeline.update_components(guider=guider) +``` + + + + +```py +guider_spec = t2i_pipeline.get_component_spec("guider") +guider_spec.config["guidance_scale"] = 10 +t2i_pipeline.update_components(guider=guider_spec) +``` + + + + +## 上传自定义引导器 + +在自定义引导器上调用 [`~utils.PushToHubMixin.push_to_hub`] 方法,将其分享到 Hub。 + +```py +guider.push_to_hub("YiYiXu/modular-loader-t2i-guider", subfolder="pag_guider") +``` + +要使此引导器可用于管道,可以修改 `modular_model_index.json` 文件或使用 [`~ModularPipeline.update_components`] 方法。 + + + + +编辑 `modular_model_index.json` 文件,并添加引导器的加载规范,指向包含引导器配置的文件夹 +例如。 + +```json +{ + "guider": [ + "diffusers", + "PerturbedAttentionGuidance", + { + "repo": "YiYiXu/modular-loader-t2i-guider", + "revision": null, + "subfolder": "pag_guider", + "type_hint": [ + "diffusers", + "PerturbedAttentionGuidance" + ], + "variant": null + } + ], +``` + + + + +将 [`~ComponentSpec.default_creation_method`] 更改为 `from_pretrained` 并使用 [`~ModularPipeline.update_components`] 来更新引导器和组件规范以及管道配置。 + +> [!TIP] +> 更改创建方法将返回文本,告知您正在将创建类型更改为 `from_pretrained`。 +> ```bash +> ModularPipeline.update_components: 将引导器的 default_creation_method 从 from_config 更改为 from_pretrained。 +> ``` + +```py +guider_spec = t2i_pipeline.get_component_spec("guider") +guider_spec.default_creation_method="from_pretrained" +guider_spec.repo="YiYiXu/modular-loader-t2i-guider" +guider_spec.subfolder="pag_guider" +pag_guider = guider_spec.load() +t2i_pipeline.update_components(guider=pag_guider) +``` + +要使其成为管道的默认引导器,请调用 [`~utils.PushToHubMixin.push_to_hub`]。这是一个可选步骤,如果您仅在本地进行实验,则不需要。 + +```py +t2i_pipeline.push_to_hub("YiYiXu/modular-doc-guider") +``` + + + \ No newline at end of file diff --git a/docs/source/zh/optimization/cache.md b/docs/source/zh/optimization/cache.md new file mode 100644 index 0000000000..f7a94de4f1 --- /dev/null +++ b/docs/source/zh/optimization/cache.md @@ -0,0 +1,67 @@ + + +# 缓存 + +缓存通过存储和重用不同层的中间输出(如注意力层和前馈层)来加速推理,而不是在每个推理步骤执行整个计算。它显著提高了生成速度,但以更多内存为代价,并且不需要额外的训练。 + +本指南向您展示如何在 Diffusers 中使用支持的缓存方法。 + +## 金字塔注意力广播 + +[金字塔注意力广播 (PAB)](https://huggingface.co/papers/2408.12588) 基于这样一种观察:在生成过程的连续时间步之间,注意力输出差异不大。注意力差异在交叉注意力层中最小,并且通常在一个较长的时间步范围内被缓存。其次是时间注意力和空间注意力层。 + +> [!TIP] +> 并非所有视频模型都有三种类型的注意力(交叉、时间和空间)! + +PAB 可以与其他技术(如序列并行性和无分类器引导并行性(数据并行性))结合,实现近乎实时的视频生成。 + +设置并传递一个 [`PyramidAttentionBroadcastConfig`] 到管道的变换器以启用它。`spatial_attention_block_skip_range` 控制跳过空间注意力块中注意力计算的频率,`spatial_attention_timestep_skip_range` 是要跳过的时间步范围。注意选择一个合适的范围,因为较小的间隔可能导致推理速度变慢,而较大的间隔可能导致生成质量降低。 + +```python +import torch +from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig + +pipeline = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) +pipeline.to("cuda") + +config = PyramidAttentionBroadcastConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(100, 800), + current_timestep_callback=lambda: pipe.current_timestep, +) +pipeline.transformer.enable_cache(config) +``` + +## FasterCache + +[FasterCache](https://huggingface.co/papers/2410.19355) 缓存并重用注意力特征,类似于 [PAB](#pyramid-attention-broadcast),因为每个连续时间步的输出差异很小。 + +此方法在使用无分类器引导进行采样时(在大多数基础模型中常见),也可能选择跳过无条件分支预测,并且 +如果连续时间步之间的预测潜在输出存在显著冗余,则从条件分支预测中估计它。 + +设置并将 [`FasterCacheConfig`] 传递给管道的 transformer 以启用它。 + +```python +import torch +from diffusers import CogVideoXPipeline, FasterCacheConfig + +pipe line= CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) +pipeline.to("cuda") + +config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 681), + current_timestep_callback=lambda: pipe.current_timestep, + attention_weight_callback=lambda _: 0.3, + unconditional_batch_skip_range=5, + unconditional_batch_timestep_skip_range=(-1, 781), + tensor_format="BFCHW", +) +pipeline.transformer.enable_cache(config) +``` \ No newline at end of file diff --git a/docs/source/zh/optimization/coreml.md b/docs/source/zh/optimization/coreml.md new file mode 100644 index 0000000000..1d78866720 --- /dev/null +++ b/docs/source/zh/optimization/coreml.md @@ -0,0 +1,163 @@ + + +# 如何使用 Core ML 运行 Stable Diffusion + +[Core ML](https://developer.apple.com/documentation/coreml) 是 Apple 框架支持的模型格式和机器学习库。如果您有兴趣在 macOS 或 iOS/iPadOS 应用中运行 Stable Diffusion 模型,本指南将展示如何将现有的 PyTorch 检查点转换为 Core ML 格式,并使用 Python 或 Swift 进行推理。 + +Core ML 模型可以利用 Apple 设备中所有可用的计算引擎:CPU、GPU 和 Apple Neural Engine(或 ANE,一种在 Apple Silicon Mac 和现代 iPhone/iPad 中可用的张量优化加速器)。根据模型及其运行的设备,Core ML 还可以混合和匹配计算引擎,例如,模型的某些部分可能在 CPU 上运行,而其他部分在 GPU 上运行。 + + + +您还可以使用 PyTorch 内置的 `mps` 加速器在 Apple Silicon Mac 上运行 `diffusers` Python 代码库。这种方法在 [mps 指南](mps) 中有详细解释,但它与原生应用不兼容。 + + + +## Stable Diffusion Core ML 检查点 + +Stable Diffusion 权重(或检查点)以 PyTorch 格式存储,因此在使用它们之前,需要将它们转换为 Core ML 格式。 + +幸运的是,Apple 工程师基于 `diffusers` 开发了 [一个转换工具](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml),用于将 PyTorch 检查点转换为 Core ML。 + +但在转换模型之前,花点时间探索 Hugging Face Hub——很可能您感兴趣的模型已经以 Core ML 格式提供: + +- [Apple](https://huggingface.co/apple) 组织包括 Stable Diffusion 版本 1.4、1.5、2.0 基础和 2.1 基础 +- [coreml community](https://huggingface.co/coreml-community) 包括自定义微调模型 +- 使用此 [过滤器](https://huggingface.co/models?pipeline_tag=text-to-image&library=coreml&p=2&sort=likes) 返回所有可用的 Core ML 检查点 + +如果您找不到感兴趣的模型,我们建议您遵循 Apple 的 [Converting Models to Core ML](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml) 说明。 + +## 选择要使用的 Core ML 变体 + +Stable Diffusion 模型可以转换为不同的 Core ML 变体,用于不同目的: + +- 注意力类型 +使用了n个块。注意力操作用于“关注”图像表示中不同区域之间的关系,并理解图像和文本表示如何相关。注意力的计算和内存消耗很大,因此存在不同的实现方式,以适应不同设备的硬件特性。对于Core ML Stable Diffusion模型,有两种注意力变体: +* `split_einsum`([由Apple引入](https://machinelearning.apple.com/research/neural-engine-transformers))针对ANE设备进行了优化,这些设备在现代iPhone、iPad和M系列计算机中可用。 +* “原始”注意力(在`diffusers`中使用的基础实现)仅与CPU/GPU兼容,不与ANE兼容。在CPU + GPU上使用`original`注意力运行模型可能比ANE*更快*。请参阅[此性能基准](https://huggingface.co/blog/fast-mac-diffusers#performance-benchmarks)以及社区提供的[一些额外测量](https://github.com/huggingface/swift-coreml-diffusers/issues/31)以获取更多细节。 + +- 支持的推理框架。 +* `packages`适用于Python推理。这可用于在尝试将转换后的Core ML模型集成到原生应用程序之前进行测试,或者如果您想探索Core ML性能但不需要支持原生应用程序。例如,具有Web UI的应用程序完全可以使用Python Core ML后端。 +* `compiled`模型是Swift代码所必需的。Hub中的`compiled`模型将大型UNet模型权重分成多个文件,以兼容iOS和iPadOS设备。这对应于[`--chunk-unet`转换选项](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml)。如果您想支持原生应用程序,则需要选择`compiled`变体。 + +官方的Core ML Stable Diffusion[模型](https://huggingface.co/apple/coreml-stable-diffusion-v1-4/tree/main)包括这些变体,但社区的可能有所不同: + +``` +coreml-stable-diffusion-v1-4 +├── README.md +├── original +│ ├── compiled +│ └── packages +└── split_einsum + ├── compiled + └── packages +``` + +您可以下载并使用所需的变体,如下所示。 + +## Python中的Core ML推理 + +安装以下库以在Python中运行Core ML推理: + +```bash +pip install huggingface_hub +pip install git+https://github.com/apple/ml-stable-diffusion +``` + +### 下载模型检查点 + +要在Python中运行推理,请使用存储在`packages`文件夹中的版本之一,因为`compiled`版本仅与Swift兼容。您可以选择使用`original`或`split_einsum`注意力。 + +这是您如何从Hub下载`original`注意力变体到一个名为`models`的目录: + +```Python +from huggingface_hub import snapshot_download +from pathlib import Path + +repo_id = "apple/coreml-stable-diffusion-v1-4" +variant = "original/packages" + +mo +del_path = Path("./models") / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) +snapshot_download(repo_id, allow_patterns=f"{variant}/*", local_dir=model_path, local_dir_use_symlinks=False) +print(f"Model downloaded at {model_path}") +``` + +### 推理[[python-inference]] + +下载模型快照后,您可以使用 Apple 的 Python 脚本来测试它。 + +```shell +python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" -i ./models/coreml-stable-diffusion-v1-4_original_packages/original/packages -o --compute-unit CPU_AND_GPU --seed 93 +``` + +使用 `-i` 标志将下载的检查点路径传递给脚本。`--compute-unit` 表示您希望允许用于推理的硬件。它必须是以下选项之一:`ALL`、`CPU_AND_GPU`、`CPU_ONLY`、`CPU_AND_NE`。您也可以提供可选的输出路径和用于可重现性的种子。 + +推理脚本假设您使用的是 Stable Diffusion 模型的原始版本,`CompVis/stable-diffusion-v1-4`。如果您使用另一个模型,您*必须*在推理命令行中使用 `--model-version` 选项指定其 Hub ID。这适用于已支持的模型以及您自己训练或微调的自定义模型。 + +例如,如果您想使用 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5): + +```shell +python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" --compute-unit ALL -o output --seed 93 -i models/coreml-stable-diffusion-v1-5_original_packages --model-version stable-diffusion-v1-5/stable-diffusion-v1-5 +``` + +## Core ML 在 Swift 中的推理 + +在 Swift 中运行推理比在 Python 中稍快,因为模型已经以 `mlmodelc` 格式编译。这在应用启动时加载模型时很明显,但如果在之后运行多次生成,则不应明显。 + +### 下载 + +要在您的 Mac 上运行 Swift 推理,您需要一个 `compiled` 检查点版本。我们建议您使用类似于先前示例的 Python 代码在本地下载它们,但使用 `compiled` 变体之一: + +```Python +from huggingface_hub import snapshot_download +from pathlib import Path + +repo_id = "apple/coreml-stable-diffusion-v1-4" +variant = "original/compiled" + +model_path = Path("./models") / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) +snapshot_download(repo_id, allow_patterns=f"{variant}/*", local_dir=model_path, local_dir_use_symlinks=False) +print(f"Model downloaded at {model_path}") +``` + +### 推理[[swift-inference]] + +要运行推理,请克隆 Apple 的仓库: + +```bash +git clone https://github.com/apple/ml-stable-diffusion +cd ml-stable-diffusion +``` + +然后使用 Apple 的命令行工具,[Swift Package Manager](https://www.swift.org/package-manager/#): + +```bash +swift run StableDiffusionSample --resource-path models/coreml-stable-diffusion-v1-4_original_compiled --compute-units all "a photo of an astronaut riding a horse on mars" +``` + +您必须在 `--resource-path` 中指定上一步下载的检查点之一,请确保它包含扩展名为 `.mlmodelc` 的已编译 Core ML 包。`--compute-units` 必须是以下值之一:`all`、`cpuOnly`、`cpuAndGPU`、`cpuAndNeuralEngine`。 + +有关更多详细信息,请参考 [Apple 仓库中的说明](https://github.com/apple/ml-stable-diffusion)。 + +## 支持的 Diffusers 功能 + +Core ML 模型和推理代码不支持 🧨 Diffusers 的许多功能、选项和灵活性。以下是一些需要注意的限制: + +- Core ML 模型仅适用于推理。它们不能用于训练或微调。 +- 只有两个调度器已移植到 Swift:Stable Diffusion 使用的默认调度器和我们从 `diffusers` 实现移植到 Swift 的 `DPMSolverMultistepScheduler`。我们推荐您使用 `DPMSolverMultistepScheduler`,因为它在约一半的步骤中产生相同的质量。 +- 负面提示、无分类器引导尺度和图像到图像任务在推理代码中可用。高级功能如深度引导、ControlNet 和潜在上采样器尚不可用。 + +Apple 的 [转换和推理仓库](https://github.com/apple/ml-stable-diffusion) 和我们自己的 [swift-coreml-diffusers](https://github.com/huggingface/swift-coreml-diffusers) 仓库旨在作为技术演示,以帮助其他开发者在此基础上构建。 + +如果您对任何缺失功能有强烈需求,请随时提交功能请求或更好的是,贡献一个 PR 🙂。 + +## 原生 Diffusers Swift 应用 + +一个简单的方法来在您自己的 Apple 硬件上运行 Stable Diffusion 是使用 [我们的开源 Swift 仓库](https://github.com/huggingface/swift-coreml-diffusers),它基于 `diffusers` 和 Apple 的转换和推理仓库。您可以研究代码,使用 [Xcode](https://developer.apple.com/xcode/) 编译它,并根据您的需求进行适配。为了方便,[App Store 中还有一个独立 Mac 应用](https://apps.apple.com/app/diffusers/id1666309574),因此您无需处理代码或 IDE 即可使用它。如果您是开发者,并已确定 Core ML 是构建您的 Stable Diffusion 应用的最佳解决方案,那么您可以使用本指南的其余部分来开始您的项目。我们迫不及待想看看您会构建什么 🙂。 \ No newline at end of file diff --git a/docs/source/zh/optimization/deepcache.md b/docs/source/zh/optimization/deepcache.md new file mode 100644 index 0000000000..4f19d4a365 --- /dev/null +++ b/docs/source/zh/optimization/deepcache.md @@ -0,0 +1,59 @@ + + +# DeepCache +[DeepCache](https://huggingface.co/papers/2312.00858) 通过策略性地缓存和重用高级特征,同时利用 U-Net 架构高效更新低级特征,来加速 [`StableDiffusionPipeline`] 和 [`StableDiffusionXLPipeline`]。 + +首先安装 [DeepCache](https://github.com/horseee/DeepCache): +```bash +pip install DeepCache +``` + +然后加载并启用 [`DeepCacheSDHelper`](https://github.com/horseee/DeepCache#usage): + +```diff + import torch + from diffusers import StableDiffusionPipeline + pipe = StableDiffusionPipeline.from_pretrained('stable-diffusion-v1-5/stable-diffusion-v1-5', torch_dtype=torch.float16).to("cuda") + ++ from DeepCache import DeepCacheSDHelper ++ helper = DeepCacheSDHelper(pipe=pipe) ++ helper.set_params( ++ cache_interval=3, ++ cache_branch_id=0, ++ ) ++ helper.enable() + + image = pipe("a photo of an astronaut on a moon").images[0] +``` + +`set_params` 方法接受两个参数:`cache_interval` 和 `cache_branch_id`。`cache_interval` 表示特征缓存的频率,指定为每次缓存操作之间的步数。`cache_branch_id` 标识网络的哪个分支(从最浅层到最深层排序)负责执行缓存过程。 +选择较低的 `cache_branch_id` 或较大的 `cache_interval` 可以加快推理速度,但会降低图像质量(这些超参数的消融实验可以在[论文](https://huggingface.co/papers/2312.00858)中找到)。一旦设置了这些参数,使用 `enable` 或 `disable` 方法来激活或停用 `DeepCacheSDHelper`。 + +
+ +
+ +您可以在 [WandB 报告](https://wandb.ai/horseee/DeepCache/runs/jwlsqqgt?workspace=user-horseee) 中找到更多生成的样本(原始管道 vs DeepCache)和相应的推理延迟。提示是从 [MS-COCO 2017](https://cocodataset.org/#home) 数据集中随机选择的。 + +## 基准测试 + +我们在 NVIDIA RTX A5000 上测试了 DeepCache 使用 50 个推理步骤加速 [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) 的速度,使用不同的配置,包括分辨率、批处理大小、缓存间隔(I)和缓存分支(B)。 + +| **分辨率** | **批次大小** | **原始** | **DeepCache(I=3, B=0)** | **DeepCache(I=5, B=0)** | **DeepCache(I=5, B=1)** | +|----------------|----------------|--------------|-------------------------|-------------------------|-------------------------| +| 512| 8| 15.96| 6.88(2.32倍)| 5.03(3.18倍)| 7.27(2.20x)| +| | 4| 8.39| 3.60(2.33倍)| 2.62(3.21倍)| 3.75(2.24x)| +| | 1| 2.61| 1.12(2.33倍)| 0.81(3.24倍)| 1.11(2.35x)| +| 768| 8| 43.58| 18.99(2.29倍)| 13.96(3.12倍)| 21.27(2.05x)| +| | 4| 22.24| 9.67(2.30倍)| 7.10(3.13倍)| 10.74(2.07x)| +| | 1| 6.33| 2.72(2.33倍)| 1.97(3.21倍)| 2.98(2.12x)| +| 1024| 8| 101.95| 45.57(2.24倍)| 33.72(3.02倍)| 53.00(1.92x)| +| | 4| 49.25| 21.86(2.25倍)| 16.19(3.04倍)| 25.78(1.91x)| +| | 1| 13.83| 6.07(2.28倍)| 4.43(3.12倍)| 7.15(1.93x)| \ No newline at end of file diff --git a/docs/source/zh/optimization/habana.md b/docs/source/zh/optimization/habana.md new file mode 100644 index 0000000000..9b15847d63 --- /dev/null +++ b/docs/source/zh/optimization/habana.md @@ -0,0 +1,28 @@ + + +# Intel Gaudi + +Intel Gaudi AI 加速器系列包括 [Intel Gaudi 1](https://habana.ai/products/gaudi/)、[Intel Gaudi 2](https://habana.ai/products/gaudi2/) 和 [Intel Gaudi 3](https://habana.ai/products/gaudi3/)。每台服务器配备 8 个设备,称为 Habana 处理单元 (HPU),在 Gaudi 3 上提供 128GB 内存,在 Gaudi 2 上提供 96GB 内存,在第一代 Gaudi 上提供 32GB 内存。有关底层硬件架构的更多详细信息,请查看 [Gaudi 架构](https://docs.habana.ai/en/latest/Gaudi_Overview/Gaudi_Architecture.html) 概述。 + +Diffusers 管道可以利用 HPU 加速,即使管道尚未添加到 [Optimum for Intel Gaudi](https://huggingface.co/docs/optimum/main/en/habana/index),也可以通过 [GPU 迁移工具包](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Model_Porting/GPU_Migration_Toolkit/GPU_Migration_Toolkit.html) 实现。 + +在您的管道上调用 `.to("hpu")` 以将其移动到 HPU 设备,如下所示为 Flux 示例: +```py +import torch +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) +pipeline.to("hpu") + +image = pipeline("一张松鼠在毕加索风格中的图像").images[0] +``` + +> [!TIP] +> 对于 Gaudi 优化的扩散管道实现,我们推荐使用 [Optimum for Intel Gaudi](https://huggingface.co/docs/optimum/main/en/habana/index)。 \ No newline at end of file diff --git a/docs/source/zh/optimization/memory.md b/docs/source/zh/optimization/memory.md new file mode 100644 index 0000000000..662dcaf4bc --- /dev/null +++ b/docs/source/zh/optimization/memory.md @@ -0,0 +1,581 @@ + + +# 减少内存使用 + +现代diffusion models,如 [Flux](../api/pipelines/flux) 和 [Wan](../api/pipelines/wan),拥有数十亿参数,在您的硬件上进行推理时会占用大量内存。这是一个挑战,因为常见的 GPU 通常没有足够的内存。为了克服内存限制,您可以使用多个 GPU(如果可用)、将一些管道组件卸载到 CPU 等。 + +本指南将展示如何减少内存使用。 + +> [!TIP] +> 请记住,这些技术可能需要根据模型进行调整。例如,基于 transformer 的扩散模型可能不会像基于 UNet 的模型那样从这些内存优化中同等受益。 + +## 多个 GPU + +如果您有多个 GPU 的访问权限,有几种选项可以高效地在硬件上加载和分发大型模型。这些功能由 [Accelerate](https://huggingface.co/docs/accelerate/index) 库支持,因此请确保先安装它。 + +```bash +pip install -U accelerate +``` + +### 分片检查点 + +将大型检查点加载到多个分片中很有用,因为分片会逐个加载。这保持了低内存使用,只需要足够的内存来容纳模型大小和最大分片大小。我们建议当 fp32 检查点大于 5GB 时进行分片。默认分片大小为 5GB。 + +在 [`~DiffusionPipeline.save_pretrained`] 中使用 `max_shard_size` 参数对检查点进行分片。 + +```py +from diffusers import AutoModel + +unet = AutoModel.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet" +) +unet.save_pretrained("sdxl-unet-sharded", max_shard_size="5GB") +``` + +现在您可以使用分片检查点,而不是常规检查点,以节省内存。 + +```py +import torch +from diffusers import AutoModel, StableDiffusionXLPipeline + +unet = AutoModel.from_pretrained( + "username/sdxl-unet-sharded", torch_dtype=torch.float16 +) +pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + unet=unet, + torch_dtype=torch.float16 +).to("cuda") +``` + +### 设备放置 + +> [!WARNING] +> 设备放置是一个实验性功能,API 可能会更改。目前仅支持 `balanced` 策略。我们计划在未来支持额外的映射策略。 + +`device_map` 参数控制管道或模型中的组件如何 +单个模型中的层分布在多个设备上。 + + + + +`balanced` 设备放置策略将管道均匀分割到所有可用设备上。 + +```py +import torch +from diffusers import AutoModel, StableDiffusionXLPipeline + +pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, + device_map="balanced" +) +``` + +您可以使用 `hf_device_map` 检查管道的设备映射。 + +```py +print(pipeline.hf_device_map) +{'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0} +``` + + + + +`device_map` 对于加载大型模型非常有用,例如具有 125 亿参数的 Flux diffusion transformer。将其设置为 `"auto"` 可以自动将模型首先分布到最快的设备上,然后再移动到较慢的设备。有关更多详细信息,请参阅 [模型分片](../training/distributed_inference#model-sharding) 文档。 + +```py +import torch +from diffusers import AutoModel + +transformer = AutoModel.from_pretrained( + "black-forest-labs/FLUX.1-dev", + subfolder="transformer", + device_map="auto", + torch_dtype=torch.bfloat16 +) +``` + +您可以使用 `hf_device_map` 检查模型的设备映射。 + +```py +print(transformer.hf_device_map) +``` + + + + +当设计您自己的 `device_map` 时,它应该是一个字典,包含模型的特定模块名称或层以及设备标识符(整数表示 GPU,`cpu` 表示 CPU,`disk` 表示磁盘)。 + +在模型上调用 `hf_device_map` 以查看模型层如何分布,然后设计您自己的映射。 + +```py +print(transformer.hf_device_map) +{'pos_embed': 0, 'time_text_embed': 0, 'context_embedder': 0, 'x_embedder': 0, 'transformer_blocks': 0, 'single_transformer_blocks.0': 0, 'single_transformer_blocks.1': 0, 'single_transformer_blocks.2': 0, 'single_transformer_blocks.3': 0, 'single_transformer_blocks.4': 0, 'single_transformer_blocks.5': 0, 'single_transformer_blocks.6': 0, 'single_transformer_blocks.7': 0, 'single_transformer_blocks.8': 0, 'single_transformer_blocks.9': 0, 'single_transformer_blocks.10': 'cpu', 'single_transformer_blocks.11': 'cpu', 'single_transformer_blocks.12': 'cpu', 'single_transformer_blocks.13': 'cpu', 'single_transformer_blocks.14': 'cpu', 'single_transformer_blocks.15': 'cpu', 'single_transformer_blocks.16': 'cpu', 'single_transformer_blocks.17': 'cpu', 'single_transformer_blocks.18': 'cpu', 'single_transformer_blocks.19': 'cpu', 'single_transformer_blocks.20': 'cpu', 'single_transformer_blocks.21': 'cpu', 'single_transformer_blocks.22': 'cpu', 'single_transformer_blocks.23': 'cpu', 'single_transformer_blocks.24': 'cpu', 'single_transformer_blocks.25': 'cpu', 'single_transformer_blocks.26': 'cpu', 'single_transformer_blocks.27': 'cpu', 'single_transformer_blocks.28': 'cpu', 'single_transformer_blocks.29': 'cpu', 'single_transformer_blocks.30': 'cpu', 'single_transformer_blocks.31': 'cpu', 'single_transformer_blocks.32': 'cpu', 'single_transformer_blocks.33': 'cpu', 'single_transformer_blocks.34': 'cpu', 'single_transformer_blocks.35': 'cpu', 'single_transformer_blocks.36': 'cpu', 'single_transformer_blocks.37': 'cpu', 'norm_out': 'cpu', 'proj_out': 'cpu'} +``` + +例如,下面的 `device_map` 将 `single_transformer_blocks.10` 到 `single_transformer_blocks.20` 放置在第二个 GPU(`1`)上。 + +```py +import torch +from diffusers import AutoModel + +device_map = { + 'pos_embed': 0, 'time_text_embed': 0, 'context_embedder': 0, 'x_embedder': 0, 'transformer_blocks': 0, 'single_transformer_blocks.0': 0, 'single_transformer_blocks.1': 0, 'single_transformer_blocks.2': 0, 'single_transformer_blocks.3': 0, 'single_transformer_blocks.4': 0, 'single_transformer_blocks.5': 0, 'single_transformer_blocks.6': 0, 'single_transformer_blocks.7': 0, 'single_transformer_blocks.8': 0, 'single_transformer_blocks.9': 0, 'single_transformer_blocks.10': 1, 'single_transformer_blocks.11': 1, 'single_transformer_blocks.12': 1, 'single_transformer_blocks.13': 1, 'single_transformer_blocks.14': 1, 'single_transformer_blocks.15': 1, 'single_transformer_blocks.16': 1, 'single_transformer_blocks.17': 1, 'single_transformer_blocks.18': 1, 'single_transformer_blocks.19': 1, 'single_transformer_blocks.20': 1, 'single_transformer_blocks.21': 'cpu', 'single_transformer_blocks.22': 'cpu', 'single_transformer_blocks.23': 'cpu', 'single_transformer_blocks.24': 'cpu', 'single_transformer_blocks.25': 'cpu', 'single_transformer_blocks.26': 'cpu', 'single_transformer_blocks.27': 'cpu', 'single_transformer_blocks.28': 'cpu', 'single_transformer_blocks.29': 'cpu', 'single_transformer_blocks.30': 'cpu', 'single_transformer_blocks.31': 'cpu', 'single_transformer_blocks.32': 'cpu', 'single_transformer_blocks.33': 'cpu', 'single_transformer_blocks.34': 'cpu', 'single_transformer_blocks.35': 'cpu', 'single_transformer_blocks.36': 'cpu', 'single_transformer_blocks.37': 'cpu', 'norm_out': 'cpu', 'proj_out': 'cpu' +} + +transformer = AutoModel.from_pretrained( + "black-forest-labs/FLUX.1-dev", + subfolder="transformer", + device_map=device_map, + torch_dtype=torch.bfloat16 +) +``` + +传递一个字典,将最大内存使用量映射到每个设备以强制执行限制。如果设备不在 `max_memory` 中,它将被忽略,管道组件不会分发到该设备。 + +```py +import torch +from diffusers import AutoModel, StableDiffusionXLPipeline + +max_memory = {0:"1GB", 1:"1GB"} +pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, + device_map="balanced", + max_memory=max_memory +) +``` + +Diffusers 默认使用所有设备的最大内存,但如果它们无法适应 GPU,则需要使用单个 GPU 并通过以下方法卸载到 CPU。 + +- [`~DiffusionPipeline.enable_model_cpu_offload`] 仅适用于单个 GPU,但非常大的模型可能无法适应它 +- 使用 [`~DiffusionPipeline.enable_sequential_cpu_offload`] 可能有效,但它极其缓慢,并且仅限于单个 GPU。 + +使用 [`~DiffusionPipeline.reset_device_map`] 方法来重置 `device_map`。如果您想在已进行设备映射的管道上使用方法如 `.to()`、[`~DiffusionPipeline.enable_sequential_cpu_offload`] 和 [`~DiffusionPipeline.enable_model_cpu_offload`],这是必要的。 + +```py +pipeline.reset_device_map() +``` + +## VAE 切片 + +VAE 切片通过将大批次输入拆分为单个数据批次并分别处理它们来节省内存。这种方法在同时生成多个图像时效果最佳。 + +例如,如果您同时生成 4 个图像,解码会将峰值激活内存增加 4 倍。VAE 切片通过一次只解码 1 个图像而不是所有 4 个图像来减少这种情况。 + +调用 [`~StableDiffusionPipeline.enable_vae_slicing`] 来启用切片 VAE。您可以预期在解码多图像批次时性能会有小幅提升,而在单图像批次时没有性能影响。 + +```py +import torch +from diffusers import AutoModel, StableDiffusionXLPipeline + +pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, +).to("cuda") +pipeline.enable_vae_slicing() +pipeline(["An astronaut riding a horse on Mars"]*32).images[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +``` + +> [!WARNING] +> [`AutoencoderKLWan`] 和 [`AsymmetricAutoencoderKL`] 类不支持切片。 + +## VAE 平铺 + +VAE 平铺通过将图像划分为较小的重叠图块而不是一次性处理整个图像来节省内存。这也减少了峰值内存使用量,因为 GPU 一次只处理一个图块。 + +调用 [`~StableDiffusionPipeline.enable_vae_tiling`] 来启用 VAE 平铺。生成的图像可能因图块到图块的色调变化而有所不同,因为它们被单独解码,但图块之间不应有明显的接缝。对于低于预设(但可配置)限制的分辨率,平铺被禁用。例如,对于 [`StableDiffusionPipeline`] 中的 VAE,此限制为 512x512。 + +```py +import torch +from diffusers import AutoPipelineForImage2Image +from diffusers.utils import load_image + +pipeline = AutoPipelineForImage2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 +).to("cuda") +pipeline.enable_vae_tiling() + +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl-init.png") +prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" +pipeline(prompt, image=init_image, strength=0.5).images[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +``` + +> [!WARNING] +> [`AutoencoderKLWan`] 和 [`AsymmetricAutoencoderKL`] 不支持平铺。 + +## 卸载 + +卸载策略将非当前活动层移动 +将模型移动到 CPU 以避免增加 GPU 内存。这些策略可以与量化和 torch.compile 结合使用,以平衡推理速度和内存使用。 + +有关更多详细信息,请参考 [编译和卸载量化模型](./speed-memory-optims) 指南。 + +### CPU 卸载 + +CPU 卸载选择性地将权重从 GPU 移动到 CPU。当需要某个组件时,它被传输到 GPU;当不需要时,它被移动到 CPU。此方法作用于子模块而非整个模型。它通过避免将整个模型存储在 GPU 上来节省内存。 + +CPU 卸载显著减少内存使用,但由于子模块在设备之间多次来回传递,它也非常慢。由于速度极慢,它通常不实用。 + +> [!WARNING] +> 在调用 [`~DiffusionPipeline.enable_sequential_cpu_offload`] 之前,不要将管道移动到 CUDA,否则节省的内存非常有限(更多细节请参考此 [issue](https://github.com/huggingface/diffusers/issues/1934))。这是一个状态操作,会在模型上安装钩子。 + +调用 [`~DiffusionPipeline.enable_sequential_cpu_offload`] 以在管道上启用它。 + +```py +import torch +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16 +) +pipeline.enable_sequential_cpu_offload() + +pipeline( + prompt="An astronaut riding a horse on Mars", + guidance_scale=0., + height=768, + width=1360, + num_inference_steps=4, + max_sequence_length=256, +).images[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +``` + +### 模型卸载 + +模型卸载将整个模型移动到 GPU,而不是选择性地移动某些层或模型组件。一个主要管道模型,通常是文本编码器、UNet 和 VAE,被放置在 GPU 上,而其他组件保持在 CPU 上。像 UNet 这样运行多次的组件会一直留在 GPU 上,直到完全完成且不再需要。这消除了 [CPU 卸载](#cpu-offloading) 的通信开销,使模型卸载成为一个更快的替代方案。权衡是内存节省不会那么大。 + +> [!WARNING] +> 请注意,如果在安装钩子后模型在管道外部被重用(更多细节请参考 [移除钩子](https://huggingface.co/docs/accelerate/en/package_reference/big_modeling#accelerate.hooks.remove_hook_from_module)),您需要按预期顺序运行整个管道和模型以正确卸载它们。这是一个状态操作,会在模型上安装钩子。 + +调用 [`~DiffusionPipeline.enable_model_cpu_offload`] 以在管道上启用它。 + +```py +import torch +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16 +) +pipeline.enable_model_cpu_offload() + +pipeline( + prompt="An astronaut riding a horse on Mars", + guidance_scale=0., + height=768, + width=1360, + num_inference_steps=4, + max_sequence_length=256, +).images[0] +print(f"最大内存保留: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +``` + +[`~DiffusionPipeline.enable_model_cpu_offload`] 在您单独使用 [`~StableDiffusionXLPipeline.encode_prompt`] 方法生成文本编码器隐藏状态时也有帮助。 + +### 组卸载 + +组卸载将内部层组([torch.nn.ModuleList](https://pytorch.org/docs/stable/generated/torch.nn.ModuleList.html) 或 [torch.nn.Sequential](https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html))移动到 CPU。它比[模型卸载](#model-offloading)使用更少的内存,并且比[CPU 卸载](#cpu-offloading)更快,因为它减少了通信开销。 + +> [!WARNING] +> 如果前向实现包含权重相关的输入设备转换,组卸载可能不适用于所有模型,因为它可能与组卸载的设备转换机制冲突。 + +调用 [`~ModelMixin.enable_group_offload`] 为继承自 [`ModelMixin`] 的标准 Diffusers 模型组件启用它。对于不继承自 [`ModelMixin`] 的其他模型组件,例如通用 [torch.nn.Module](https://pytorch.org/docs/stable/generated/torch.nn.Module.html),使用 [`~hooks.apply_group_offloading`] 代替。 + +`offload_type` 参数可以设置为 `block_level` 或 `leaf_level`。 + +- `block_level` 基于 `num_blocks_per_group` 参数卸载层组。例如,如果 `num_blocks_per_group=2` 在一个有 40 层的模型上,每次加载和卸载 2 层(总共 20 次加载/卸载)。这大大减少了内存需求。 +- `leaf_level` 在最低级别卸载单个层,等同于[CPU 卸载](#cpu-offloading)。但如果您使用流而不放弃推理速度,它可以更快。 + +```py +import torch +from diffusers import CogVideoXPipeline +from diffusers.hooks import apply_group_offloading +from diffusers.utils import export_to_video + +onload_device = torch.device("cuda") +offload_device = torch.device("cpu") +pipeline = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) + +# 对 Diffusers 模型实现使用 enable_group_offload 方法 +pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level") +pipeline.vae.enable_group_offload(onload_device=onload_device, offload_type="leaf_level") + +# 对其他模型组件使用 apply_group_offloading 方法 +apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=2) + +prompt = ( +"A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " + "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " + "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " + "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " + "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " + "atmosphere of this unique musical performance." +) +video = pipeline(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +export_to_video(video, "output.mp4", fps=8) +``` + +#### CUDA 流 +`use_stream` 参数可以激活支持异步数据传输流的 CUDA 设备,以减少整体执行时间,与 [CPU 卸载](#cpu-offloading) 相比。它通过使用层预取重叠数据传输和计算。下一个要执行的层在当前层仍在执行时加载到 GPU 上。这会显著增加 CPU 内存,因此请确保您有模型大小的 2 倍内存。 + +设置 `record_stream=True` 以获得更多速度提升,代价是内存使用量略有增加。请参阅 [torch.Tensor.record_stream](https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html) 文档了解更多信息。 + +> [!TIP] +> 当 `use_stream=True` 在启用平铺的 VAEs 上时,确保在推理前进行虚拟前向传递(可以使用虚拟输入),以避免设备不匹配错误。这可能不适用于所有实现,因此如果遇到任何问题,请随时提出问题。 + +如果您在使用启用 `use_stream` 的 `block_level` 组卸载,`num_blocks_per_group` 参数应设置为 `1`,否则会引发警告。 + +```py +pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True, record_stream=True) +``` + +`low_cpu_mem_usage` 参数可以设置为 `True`,以在使用流进行组卸载时减少 CPU 内存使用。它最适合 `leaf_level` 卸载和 CPU 内存瓶颈的情况。通过动态创建固定张量而不是预先固定它们来节省内存。然而,这可能会增加整体执行时间。 + +#### 卸载到磁盘 +组卸载可能会消耗大量系统内存,具体取决于模型大小。在内存有限的系统上,尝试将组卸载到磁盘作为辅助内存。 + +在 [`~ModelMixin.enable_group_offload`] 或 [`~hooks.apply_group_offloading`] 中设置 `offload_to_disk_path` 参数,将模型卸载到磁盘。 + +```py +pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", offload_to_disk_path="path/to/disk") + +apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=2, offload_to_disk_path="path/to/disk") +``` + +参考这些[两个](https://github.com/huggingface/diffusers/pull/11682#issue-3129365363)[表格](https://github.com/huggingface/diffusers/pull/11682#issuecomment-2955715126)来比较速度和内存的权衡。 + +## 分层类型转换 + +> [!TIP] +> 将分层类型转换与[组卸载](#group-offloading)结合使用,以获得更多内存节省。 + +分层类型转换将权重存储在较小的数据格式中(例如 `torch.float8_e4m3fn` 和 `torch.float8_e5m2`),以使用更少的内存,并在计算时将那些权重上转换为更高精度如 `torch.float16` 或 `torch.bfloat16`。某些层(归一化和调制相关权重)被跳过,因为将它们存储在 fp8 中可能会降低生成质量。 + +> [!WARNING] +> 如果前向实现包含权重的内部类型转换,分层类型转换可能不适用于所有模型。当前的分层类型转换实现假设前向传递独立于权重精度,并且输入数据类型始终在 `compute_dtype` 中指定(请参见[这里](https://github.com/huggingface/transformers/blob/7f5077e53682ca855afc826162b204ebf809f1f9/src/transformers/models/t5/modeling_t5.py#L294-L299)以获取不兼容的实现)。 +> +> 分层类型转换也可能在使用[PEFT](https://huggingface.co/docs/peft/index)层的自定义建模实现上失败。有一些检查可用,但它们没有经过广泛测试或保证在所有情况下都能工作。 + +调用 [`~ModelMixin.enable_layerwise_casting`] 来设置存储和计算数据类型。 + +```py +import torch +from diffusers import CogVideoXPipeline, CogVideoXTransformer3DModel +from diffusers.utils import export_to_video + +transformer = CogVideoXTransformer3DModel.from_pretrained( + "THUDM/CogVideoX-5b", + subfolder="transformer", + torch_dtype=torch.bfloat16 +) +transformer.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16) + +pipeline = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", + transformer=transformer, + torch_dtype=torch.bfloat16 +).to("cuda") +prompt = ( + "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " + "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " + "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " + "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " + "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " + "atmosphere of this unique musical performance." +) +video = pipeline(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +export_to_video(video, "output.mp4", fps=8) +``` + +[`~hooks.apply_layerwise_casting`] 方法也可以在您需要更多控制和灵活性时使用。它可以通过在特定内部模块上调用它来部分应用于模型层。使用 `skip_modules_pattern` 或 `skip_modules_classes` 参数来指定要避免的模块,例如归一化和调制层。 + +```python +import torch +from diffusers import CogVideoXTransformer3DModel +from diffusers.hooks import apply_layerwise_casting + +transformer = CogVideoXTransformer3DModel.from_pretrained( + "THUDM/CogVideoX-5b", + subfolder="transformer", + torch_dtype=torch.bfloat16 +) + +# 跳过归一化层 +apply_layerwise_casting( + transformer, + storage_dtype=torch.float8_e4m3fn, + compute_dtype=torch.bfloat16, + skip_modules_classes=["norm"], + non_blocking=True, +) +``` + +## torch.channels_last + +[torch.channels_last](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) 将张量的存储方式从 `(批次大小, 通道数, 高度, 宽度)` 翻转为 `(批次大小, 高度, 宽度, 通道数)`。这使张量与硬件如何顺序访问存储在内存中的张量对齐,并避免了在内存中跳转以访问像素值。 + +并非所有运算符当前都支持通道最后格式,并且可能导致性能更差,但仍然值得尝试。 + +```py +print(pipeline.unet.conv_out.state_dict()["weight"].stride()) # (2880, 9, 3, 1) +pipeline.unet.to(memory_format=torch.channels_last) # 原地操作 +print( + pipeline.unet.conv_out.state_dict()["weight"].stride() +) # (2880, 1, 960, 320) 第二个维度的跨度为1证明它有效 +``` + +## torch.jit.trace + +[torch.jit.trace](https://pytorch.org/docs/stable/generated/torch.jit.trace.html) 记录模型在样本输入上执行的操作,并根据记录的执行路径创建一个新的、优化的模型表示。在跟踪过程中,模型被优化以减少来自Python和动态控制流的开销,并且操作被融合在一起以提高效率。返回的可执行文件或 [ScriptFunction](https://pytorch.org/docs/stable/generated/torch.jit.ScriptFunction.html) 可以被编译。 + +```py +import time +import torch +from diffusers import StableDiffusionPipeline +import functools + +# torch 禁用梯度 +torch.set_grad_enabled(False) + +# 设置变量 +n_experiments = 2 +unet_runs_per_experiment = 50 + +# 加载样本输入 +def generate_inputs(): + sample = torch.randn((2, 4, 64, 64), device="cuda", dtype=torch.float16) + timestep = torch.rand(1, device="cuda", dtype=torch.float16) * 999 + encoder_hidden_states = torch.randn((2, 77, 768), device="cuda", dtype=torch.float16) + return sample, timestep, encoder_hidden_states + + +pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + torch_dtype=torch.float16, + use_safetensors=True, +).to("cuda") +unet = pipeline.unet +unet.eval() +unet.to(memory +_format=torch.channels_last) # 使用 channels_last 内存格式 +unet.forward = functools.partial(unet.forward, return_dict=False) # 设置 return_dict=False 为默认 + +# 预热 +for _ in range(3): + with torch.inference_mode(): + inputs = generate_inputs() + orig_output = unet(*inputs) + +# 追踪 +print("tracing..") +unet_traced = torch.jit.trace(unet, inputs) +unet_traced.eval() +print("done tracing") + +# 预热和优化图 +for _ in range(5): + with torch.inference_mode(): + inputs = generate_inputs() + orig_output = unet_traced(*inputs) + +# 基准测试 +with torch.inference_mode(): + for _ in range(n_experiments): + torch.cuda.synchronize() + start_time = time.time() + for _ in range(unet_runs_per_experiment): + orig_output = unet_traced(*inputs) + torch.cuda.synchronize() + print(f"unet traced inference took {time.time() - start_time:.2f} seconds") + for _ in range(n_experiments): + torch.cuda.synchronize() + start_time = time.time() + for _ in range(unet_runs_per_experiment): + orig_output = unet(*inputs) + torch.cuda.synchronize() + print(f"unet inference took {time.time() - start_time:.2f} seconds") + +# 保存模型 +unet_traced.save("unet_traced.pt") +``` + +替换管道的 UNet 为追踪版本。 + +```py +import torch +from diffusers import StableDiffusionPipeline +from dataclasses import dataclass + +@dataclass +class UNet2DConditionOutput: + sample: torch.Tensor + +pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + torch_dtype=torch.float16, + use_safetensors=True, +).to("cuda") + +# 使用 jitted unet +unet_traced = torch.jit.load("unet_traced.pt") + +# del pipeline.unet +class TracedUNet(torch.nn.Module): + def __init__(self): + super().__init__() + self.in_channels = pipe.unet.config.in_channels + self.device = pipe.unet.device + + def forward(self, latent_model_input, t, encoder_hidden_states): + sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0] + return UNet2DConditionOutput(sample=sample) + +pipeline.unet = TracedUNet() + +with torch.inference_mode(): + image = pipe([prompt] * 1, num_inference_steps=50).images[0] +``` + +## 内存高效注意力 + +> [!TIP] +> 内存高效注意力优化内存使用 *和* [推理速度](./fp16#scaled-dot-product-attention)! + +Transformers 注意力机制是内存密集型的,尤其对于长序列,因此您可以尝试使用不同且更内存高效的注意力类型。 + +默认情况下,如果安装了 PyTorch >= 2.0,则使用 [scaled dot-product attention (SDPA)](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)。您无需对代码进行任何额外更改。 + +SDPA 还支持 [FlashAttention](https://github.com/Dao-AILab/flash-attention) 和 [xFormers](https://github.com/facebookresearch/xformers),以及 a +这是一个原生的 C++ PyTorch 实现。它会根据您的输入自动选择最优的实现。 + +您可以使用 [`~ModelMixin.enable_xformers_memory_efficient_attention`] 方法显式地使用 xFormers。 + +```py +# pip install xformers +import torch +from diffusers import StableDiffusionXLPipeline + +pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, +).to("cuda") +pipeline.enable_xformers_memory_efficient_attention() +``` + +调用 [`~ModelMixin.disable_xformers_memory_efficient_attention`] 来禁用它。 + +```py +pipeline.disable_xformers_memory_efficient_attention() +``` \ No newline at end of file diff --git a/docs/source/zh/optimization/mps.md b/docs/source/zh/optimization/mps.md new file mode 100644 index 0000000000..c76a475336 --- /dev/null +++ b/docs/source/zh/optimization/mps.md @@ -0,0 +1,82 @@ + + +# Metal Performance Shaders (MPS) + +> [!TIP] +> 带有 MPS 徽章的管道表示模型可以利用 Apple silicon 设备上的 MPS 后端进行更快的推理。欢迎提交 [Pull Request](https://github.com/huggingface/diffusers/compare) 来为缺少此徽章的管道添加它。 + +🤗 Diffusers 与 Apple silicon(M1/M2 芯片)兼容,使用 PyTorch 的 [`mps`](https://pytorch.org/docs/stable/notes/mps.html) 设备,该设备利用 Metal 框架来发挥 MacOS 设备上 GPU 的性能。您需要具备: + +- 配备 Apple silicon(M1/M2)硬件的 macOS 计算机 +- macOS 12.6 或更高版本(推荐 13.0 或更高) +- arm64 版本的 Python +- [PyTorch 2.0](https://pytorch.org/get-started/locally/)(推荐)或 1.13(支持 `mps` 的最低版本) + +`mps` 后端使用 PyTorch 的 `.to()` 接口将 Stable Diffusion 管道移动到您的 M1 或 M2 设备上: + +```python +from diffusers import DiffusionPipeline + +pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") +pipe = pipe.to("mps") + +# 如果您的计算机内存小于 64 GB,推荐使用 +pipe.enable_attention_slicing() + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] +image +``` + + + +PyTorch [mps](https://pytorch.org/docs/stable/notes/mps.html) 后端不支持大小超过 `2**32` 的 NDArray。如果您遇到此问题,请提交 [Issue](https://github.com/huggingface/diffusers/issues/new/choose) 以便我们调查。 + + + +如果您使用 **PyTorch 1.13**,您需要通过管道进行一次额外的"预热"传递。这是一个临时解决方法,用于解决首次推理传递产生的结果与后续传递略有不同的问题。您只需要执行此传递一次,并且在仅进行一次推理步骤后可以丢弃结果。 + +```diff + from diffusers import DiffusionPipeline + + pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5").to("mps") + pipe.enable_attention_slicing() + + prompt = "a photo of an astronaut riding a horse on mars" + # 如果 PyTorch 版本是 1.13,进行首次"预热"传递 ++ _ = pipe(prompt, num_inference_steps=1) + + # 预热传递后,结果与 CPU 设备上的结果匹配。 + image = pipe(prompt).images[0] +``` + +## 故障排除 + +本节列出了使用 `mps` 后端时的一些常见问题及其解决方法。 + +### 注意力切片 + +M1/M2 性能对内存压力非常敏感。当发生这种情况时,系统会自动交换内存,这会显著降低性能。 + +为了防止这种情况发生,我们建议使用*注意力切片*来减少推理过程中的内存压力并防止交换。这在您的计算机系统内存少于 64GB 或生成非标准分辨率(大于 512×512 像素)的图像时尤其相关。在您的管道上调用 [`~DiffusionPipeline.enable_attention_slicing`] 函数: + +```py +from diffusers import DiffusionPipeline +import torch + +pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("mps") +pipeline.enable_attention_slicing() +``` + +注意力切片将昂贵的注意力操作分多个步骤执行,而不是一次性完成。在没有统一内存的计算机中,它通常能提高约 20% 的性能,但我们观察到在大多数 Apple 芯片计算机中,除非您有 64GB 或更多 RAM,否则性能会*更好*。 + +### 批量推理 + +批量生成多个提示可能会导致崩溃或无法可靠工作。如果是这种情况,请尝试迭代而不是批量处理。 \ No newline at end of file diff --git a/docs/source/zh/optimization/neuron.md b/docs/source/zh/optimization/neuron.md new file mode 100644 index 0000000000..709404d56b --- /dev/null +++ b/docs/source/zh/optimization/neuron.md @@ -0,0 +1,59 @@ + + +# AWS Neuron + +Diffusers 功能可在 [AWS Inf2 实例](https://aws.amazon.com/ec2/instance-types/inf2/)上使用,这些是由 [Neuron 机器学习加速器](https://aws.amazon.com/machine-learning/inferentia/)驱动的 EC2 实例。这些实例旨在提供更好的计算性能(更高的吞吐量、更低的延迟)和良好的成本效益,使其成为 AWS 用户将扩散模型部署到生产环境的良好选择。 + +[Optimum Neuron](https://huggingface.co/docs/optimum-neuron/en/index) 是 Hugging Face 库与 AWS 加速器之间的接口,包括 AWS [Trainium](https://aws.amazon.com/machine-learning/trainium/) 和 AWS [Inferentia](https://aws.amazon.com/machine-learning/inferentia/)。它支持 Diffusers 中的许多功能,并具有类似的 API,因此如果您已经熟悉 Diffusers,学习起来更容易。一旦您创建了 AWS Inf2 实例,请安装 Optimum Neuron。 + +```bash +python -m pip install --upgrade-strategy eager optimum[neuronx] +``` + + + +我们提供预构建的 [Hugging Face Neuron 深度学习 AMI](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2)(DLAMI)和用于 Amazon SageMaker 的 Optimum Neuron 容器。建议正确设置您的环境。 + + + +下面的示例演示了如何在 inf2.8xlarge 实例上使用 Stable Diffusion XL 模型生成图像(一旦模型编译完成,您可以切换到更便宜的 inf2.xlarge 实例)。要生成一些图像,请使用 [`~optimum.neuron.NeuronStableDiffusionXLPipeline`] 类,该类类似于 Diffusers 中的 [`StableDiffusionXLPipeline`] 类。 + +与 Diffusers 不同,您需要将管道中的模型编译为 Neuron 格式,即 `.neuron`。运行以下命令将模型导出为 `.neuron` 格式。 + +```bash +optimum-cli export neuron --model stabilityai/stable-diffusion-xl-base-1.0 \ + --batch_size 1 \ + --height 1024 `# 生成图像的高度(像素),例如 768, 1024` \ + --width 1024 `# 生成图像的宽度(像素),例如 768, 1024` \ + --num_images_per_prompt 1 `# 每个提示生成的图像数量,默认为 1` \ + --auto_cast matmul `# 仅转换矩阵乘法操作` \ + --auto_cast_type bf16 `# 将操作从 FP32 转换为 BF16` \ + sd_neuron_xl/ +``` + +现在使用预编译的 SDXL 模型生成一些图像。 + +```python +>>> from optimum.neuron import Neu +ronStableDiffusionXLPipeline + +>>> stable_diffusion_xl = NeuronStableDiffusionXLPipeline.from_pretrained("sd_neuron_xl/") +>>> prompt = "a pig with wings flying in floating US dollar banknotes in the air, skyscrapers behind, warm color palette, muted colors, detailed, 8k" +>>> image = stable_diffusion_xl(prompt).images[0] +``` + +peggy generated by sdxl on inf2 + +欢迎查看Optimum Neuron [文档](https://huggingface.co/docs/optimum-neuron/en/inference_tutorials/stable_diffusion#generate-images-with-stable-diffusion-models-on-aws-inferentia)中更多不同用例的指南和示例! \ No newline at end of file diff --git a/docs/source/zh/optimization/open_vino.md b/docs/source/zh/optimization/open_vino.md new file mode 100644 index 0000000000..8229c5a944 --- /dev/null +++ b/docs/source/zh/optimization/open_vino.md @@ -0,0 +1,77 @@ + + +# OpenVINO + +🤗 [Optimum](https://github.com/huggingface/optimum-intel) 提供与 OpenVINO 兼容的 Stable Diffusion 管道,可在各种 Intel 处理器上执行推理(请参阅支持的设备[完整列表](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html))。 + +您需要安装 🤗 Optimum Intel,并使用 `--upgrade-strategy eager` 选项以确保 [`optimum-intel`](https://github.com/huggingface/optimum-intel) 使用最新版本: + +```bash +pip install --upgrade-strategy eager optimum["openvino"] +``` + +本指南将展示如何使用 Stable Diffusion 和 Stable Diffusion XL (SDXL) 管道与 OpenVINO。 + +## Stable Diffusion + +要加载并运行推理,请使用 [`~optimum.intel.OVStableDiffusionPipeline`]。如果您想加载 PyTorch 模型并即时转换为 OpenVINO 格式,请设置 `export=True`: + +```python +from optimum.intel import OVStableDiffusionPipeline + +model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" +pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) +prompt = "sailing ship in storm by Rembrandt" +image = pipeline(prompt).images[0] + +# 别忘了保存导出的模型 +pipeline.save_pretrained("openvino-sd-v1-5") +``` + +为了进一步加速推理,静态重塑模型。如果您更改任何参数,例如输出高度或宽度,您需要再次静态重塑模型。 + +```python +# 定义与输入和期望输出相关的形状 +batch_size, num_images, height, width = 1, 1, 512, 512 + +# 静态重塑模型 +pipeline.reshape(batch_size, height, width, num_images) +# 在推理前编译模型 +pipeline.compile() + +image = pipeline( + prompt, + height=height, + width=width, + num_images_per_prompt=num_images, +).images[0] +``` +
+ +
+ +您可以在 🤗 Optimum [文档](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion) 中找到更多示例,Stable Diffusion 支持文本到图像、图像到图像和修复。 + +## Stable Diffusion XL + +要加载并运行 SDXL 推理,请使用 [`~optimum.intel.OVStableDiffusionXLPipeline`]: + +```python +from optimum.intel import OVStableDiffusionXLPipeline + +model_id = "stabilityai/stable-diffusion-xl-base-1.0" +pipeline = OVStableDiffusionXLPipeline.from_pretrained(model_id) +prompt = "sailing ship in storm by Rembrandt" +image = pipeline(prompt).images[0] +``` + +为了进一步加速推理,可以如Stable Diffusion部分所示[静态重塑](#stable-diffusion)模型。 + +您可以在🤗 Optimum[文档](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion-xl)中找到更多示例,并且在OpenVINO中运行SDXL支持文本到图像和图像到图像。 \ No newline at end of file diff --git a/docs/source/zh/optimization/para_attn.md b/docs/source/zh/optimization/para_attn.md new file mode 100644 index 0000000000..106a8818c6 --- /dev/null +++ b/docs/source/zh/optimization/para_attn.md @@ -0,0 +1,497 @@ +# ParaAttention + +
+ +
+
+ +
+ +大型图像和视频生成模型,如 [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) 和 [HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo),由于其规模,可能对实时应用和部署构成推理挑战。 + +[ParaAttention](https://github.com/chengzeyi/ParaAttention) 是一个实现了**上下文并行**和**第一块缓存**的库,可以与其他技术(如 torch.compile、fp8 动态量化)结合使用,以加速推理。 + +本指南将展示如何在 NVIDIA L20 GPU 上对 FLUX.1-dev 和 HunyuanVideo 应用 ParaAttention。 +在我们的基线基准测试中,除了 HunyuanVideo 为避免内存不足错误外,未应用任何优化。 + +我们的基线基准测试显示,FLUX.1-dev 能够在 28 步中生成 1024x1024 分辨率图像,耗时 26.36 秒;HunyuanVideo 能够在 30 步中生成 129 帧 720p 分辨率视频,耗时 3675.71 秒。 + +> [!TIP] +> 对于更快的上下文并行推理,请尝试使用支持 NVLink 的 NVIDIA A100 或 H100 GPU(如果可用),尤其是在 GPU 数量较多时。 + +## 第一块缓存 + +缓存模型中 transformer 块的输出并在后续推理步骤中重用它们,可以降低计算成本并加速推理。 + +然而,很难决定何时重用缓存以确保生成图像或视频的质量。ParaAttention 直接使用**第一个 transformer 块输出的残差差异**来近似模型输出之间的差异。当差异足够小时,重用先前推理步骤的残差差异。换句话说,跳过去噪步骤。 + +这在 FLUX.1-dev 和 HunyuanVideo 推理上实现了 2 倍加速,且质量非常好。 + +
+ Cache in Diffusion Transformer +
AdaCache 的工作原理,第一块缓存是其变体
+
+ + + + +要在 FLUX.1-dev 上应用第一块缓存,请调用 `apply_cache_on_pipe`,如下所示。0.08 是 FLUX 模型的默认残差差异值。 + +```python +import time +import torch +from diffusers import FluxPipeline + +pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + torch_dtype=torch.bfloat16, +).to("cuda") + +from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe + +apply_cache_on_pipe(pipe, residual_diff_thre +shold=0.08) + +# 启用内存节省 +# pipe.enable_model_cpu_offload() +# pipe.enable_sequential_cpu_offload() + +begin = time.time() +image = pipe( + "A cat holding a sign that says hello world", + num_inference_steps=28, +).images[0] +end = time.time() +print(f"Time: {end - begin:.2f}s") + +print("Saving image to flux.png") +image.save("flux.png") +``` + +| 优化 | 原始 | FBCache rdt=0.06 | FBCache rdt=0.08 | FBCache rdt=0.10 | FBCache rdt=0.12 | +| - | - | - | - | - | - | +| 预览 | ![Original](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-original.png) | ![FBCache rdt=0.06](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.06.png) | ![FBCache rdt=0.08](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.08.png) | ![FBCache rdt=0.10](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.10.png) | ![FBCache rdt=0.12](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.12.png) | +| 墙时间 (s) | 26.36 | 21.83 | 17.01 | 16.00 | 13.78 | + +First Block Cache 将推理速度降低到 17.01 秒,与基线相比,或快 1.55 倍,同时保持几乎零质量损失。 + + + + +要在 HunyuanVideo 上应用 First Block Cache,请使用 `apply_cache_on_pipe`,如下所示。0.06 是 HunyuanVideo 模型的默认残差差值。 + +```python +import time +import torch +from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel +from diffusers.utils import export_to_video + +model_id = "tencent/HunyuanVideo" +transformer = HunyuanVideoTransformer3DModel.from_pretrained( + model_id, + subfolder="transformer", + torch_dtype=torch.bfloat16, + revision="refs/pr/18", +) +pipe = HunyuanVideoPipeline.from_pretrained( + model_id, + transformer=transformer, + torch_dtype=torch.float16, + revision="refs/pr/18", +).to("cuda") + +from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe + +apply_cache_on_pipe(pipe, residual_diff_threshold=0.6) + +pipe.vae.enable_tiling() + +begin = time.time() +output = pipe( + prompt="A cat walks on the grass, realistic", + height=720, + width=1280, + num_frames=129, + num_inference_steps=30, +).frames[0] +end = time.time() +print(f"Time: {end - begin:.2f}s") + +print("Saving video to hunyuan_video.mp4") +export_to_video(output, "hunyuan_video.mp4", fps=15) +``` + + + + HunyuanVideo 无 FBCache + + + + HunyuanVideo 与 FBCache + +First Block Cache 将推理速度降低至 2271.06 秒,相比基线快了 1.62 倍,同时保持了几乎为零的质量损失。 + + + + +## fp8 量化 + +fp8 动态量化进一步加速推理并减少内存使用。为了使用 8 位 [NVIDIA Tensor Cores](https://www.nvidia.com/en-us/data-center/tensor-cores/),必须对激活和权重进行量化。 + +使用 `float8_weight_only` 和 `float8_dynamic_activation_float8_weight` 来量化文本编码器和变换器模型。 + +默认量化方法是逐张量量化,但如果您的 GPU 支持逐行量化,您也可以尝试它以获得更好的准确性。 + +使用以下命令安装 [torchao](https://github.com/pytorch/ao/tree/main)。 + +```bash +pip3 install -U torch torchao +``` + +[torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) 使用 `mode="max-autotune-no-cudagraphs"` 或 `mode="max-autotune"` 选择最佳内核以获得性能。如果是第一次调用模型,编译可能会花费很长时间,但一旦模型编译完成,这是值得的。 + +此示例仅量化变换器模型,但您也可以量化文本编码器以进一步减少内存使用。 + +> [!TIP] +> 动态量化可能会显著改变模型输出的分布,因此您需要将 `residual_diff_threshold` 设置为更大的值以使其生效。 + + + + +```python +import time +import torch +from diffusers import FluxPipeline + +pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + torch_dtype=torch.bfloat16, +).to("cuda") + +from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe + +apply_cache_on_pipe( + pipe, + residual_diff_threshold=0.12, # 使用更大的值以使缓存生效 +) + +from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only + +quantize_(pipe.text_encoder, float8_weight_only()) +quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) +pipe.transformer = torch.compile( + pipe.transformer, mode="max-autotune-no-cudagraphs", +) + +# 启用内存节省 +# pipe.enable_model_cpu_offload() +# pipe.enable_sequential_cpu_offload() + +for i in range(2): + begin = time.time() + image = pipe( + "A cat holding a sign that says hello world", + num_inference_steps=28, + ).images[0] + end = time.time() + if i == 0: + print(f"预热时间: {end - begin:.2f}s") + else: + print(f"时间: {end - begin:.2f}s") + +print("保存图像到 flux.png") +image.save("flux.png") +``` + +fp8 动态量化和 torch.compile 将推理速度降低至 7.56 秒,相比基线快了 3.48 倍。 + + + +```python +import time +import torch +from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel +from diffusers.utils import export_to_video + +model_id = "tencent/HunyuanVideo" +transformer = HunyuanVideoTransformer3DModel.from_pretrained( + model_id, + subfolder="transformer", + torch_dtype=torch.bfloat16, + revision="refs/pr/18", +) +pipe = HunyuanVideoPipeline.from_pretrained( + model_id, + transformer=transformer, + torch_dtype=torch.float16, + revision="refs/pr/18", +).to("cuda") + +from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe + +apply_cache_on_pipe(pipe) + +from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only + +quantize_(pipe.text_encoder, float8_weight_only()) +quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) +pipe.transformer = torch.compile( + pipe.transformer, mode="max-autotune-no-cudagraphs", +) + +# Enable memory savings +pipe.vae.enable_tiling() +# pipe.enable_model_cpu_offload() +# pipe.enable_sequential_cpu_offload() + +for i in range(2): + begin = time.time() + output = pipe( + prompt="A cat walks on the grass, realistic", + height=720, + width=1280, + num_frames=129, + num_inference_steps=1 if i == 0 else 30, + ).frames[0] + end = time.time() + if i == 0: + print(f"Warm up time: {end - begin:.2f}s") + else: + print(f"Time: {end - begin:.2f}s") + +print("Saving video to hunyuan_video.mp4") +export_to_video(output, "hunyuan_video.mp4", fps=15) +``` + +NVIDIA L20 GPU 仅有 48GB 内存,在编译后且如果未调用 `enable_model_cpu_offload` 时,可能会遇到内存不足(OOM)错误,因为 HunyuanVideo 在高分辨率和大量帧数运行时具有非常大的激活张量。对于内存少于 80GB 的 GPU,可以尝试降低分辨率和帧数来避免 OOM 错误。 + +大型视频生成模型通常受注意力计算而非全连接层的瓶颈限制。这些模型不会从量化和 torch.compile 中显著受益。 + + + + +## 上下文并行性 + +上下文并行性并行化推理并随多个 GPU 扩展。ParaAttention 组合设计允许您将上下文并行性与第一块缓存和动态量化结合使用。 + +> [!TIP] +> 请参考 [ParaAttention](https://github.com/chengzeyi/ParaAttention/tree/main) 仓库获取详细说明和如何使用多个 GPU 扩展推理的示例。 + +如果推理过程需要持久化和可服务,建议使用 [torch.multiprocessing](https://pytorch.org/docs/stable/multiprocessing.html) 编写您自己的推理处理器。这可以消除启动进程以及加载和重新编译模型的开销。 + + + + +以下代码示例结合了第一块缓存、fp8动态量化、torch.compile和上下文并行,以实现最快的推理速度。 + +```python +import time +import torch +import torch.distributed as dist +from diffusers import FluxPipeline + +dist.init_process_group() + +torch.cuda.set_device(dist.get_rank()) + +pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + torch_dtype=torch.bfloat16, +).to("cuda") + +from para_attn.context_parallel import init_context_parallel_mesh +from para_attn.context_parallel.diffusers_adapters import parallelize_pipe +from para_attn.parallel_vae.diffusers_adapters import parallelize_vae + +mesh = init_context_parallel_mesh( + pipe.device.type, + max_ring_dim_size=2, +) +parallelize_pipe( + pipe, + mesh=mesh, +) +parallelize_vae(pipe.vae, mesh=mesh._flatten()) + +from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe + +apply_cache_on_pipe( + pipe, + residual_diff_threshold=0.12, # 使用较大的值以使缓存生效 +) + +from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only + +quantize_(pipe.text_encoder, float8_weight_only()) +quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) +torch._inductor.config.reorder_for_compute_comm_overlap = True +pipe.transformer = torch.compile( + pipe.transformer, mode="max-autotune-no-cudagraphs", +) + +# 启用内存节省 +# pipe.enable_model_cpu_offload(gpu_id=dist.get_rank()) +# pipe.enable_sequential_cpu_offload(gpu_id=dist.get_rank()) + +for i in range(2): + begin = time.time() + image = pipe( + "A cat holding a sign that says hello world", + num_inference_steps=28, + output_type="pil" if dist.get_rank() == 0 else "pt", + ).images[0] + end = time.time() + if dist.get_rank() == 0: + if i == 0: + print(f"预热时间: {end - begin:.2f}s") + else: + print(f"时间: {end - begin:.2f}s") + +if dist.get_rank() == 0: + print("将图像保存到flux.png") + image.save("flux.png") + +dist.destroy_process_group() +``` + +保存到`run_flux.py`并使用[torchrun](https://pytorch.org/docs/stable/elastic/run.html)启动。 + +```bash +# 使用--nproc_per_node指定GPU数量 +torchrun --nproc_per_node=2 run_flux.py +``` + +推理速度降至8.20秒,相比基线快了3.21倍,使用2个NVIDIA L20 GPU。在4个L20上,推理速度为3.90秒,快了6.75倍。 + + + + +以下代码示例结合了第一块缓存和上下文并行,以实现最快的推理速度。 + +```python +import time +import torch +import torch.distributed as dist +from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel +from diffusers.utils import export_to_video + +dist.init_process_group() + +torch.cuda.set_device(dist.get_rank()) + +model_id = "tencent/HunyuanVideo" +transformer = HunyuanVideoTransformer3DModel.from_pretrained( + model_id, + subfolder="transformer", + torch_dtype=torch.bfloat16, + revision="refs/pr/18", +) +pipe = HunyuanVideoPipeline.from_pretrained( + model_id, + transformer=transformer, + torch_dtype=torch.float16, + revision="refs/pr/18", +).to("cuda") + +from para_attn.context_parallel import init_context_parallel_mesh +from para_attn.context_parallel.diffusers_adapters import parallelize_pipe +from para_attn.parallel_vae.diffusers_adapters import parallelize_vae + +mesh = init_context_parallel_mesh( + pipe.device.type, +) +parallelize_pipe( + pipe, + mesh=mesh, +) +parallelize_vae(pipe.vae, mesh=mesh._flatten()) + +from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe + +apply_cache_on_pipe(pipe) + +# from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only +# +# torch._inductor.config.reorder_for_compute_comm_overlap = True +# +# quantize_(pipe.text_encoder, float8_weight_only()) +# quantize_(pipe.transformer, float8_dynamic_activation_float8_weight()) +# pipe.transformer = torch.compile( +# pipe.transformer, mode="max-autotune-no-cudagraphs", +# ) + +# 启用内存节省 +pipe.vae.enable_tiling() +# pipe.enable_model_cpu_offload(gpu_id=dist.get_rank()) +# pipe.enable_sequential_cpu_offload(gpu_id=dist.get_rank()) + +for i in range(2): + begin = time.time() + output = pipe( + prompt="A cat walks on the grass, realistic", + height=720, + width=1280, + num_frames=129, + num_inference_steps=1 if i == 0 else 30, + output_type="pil" if dist.get_rank() == 0 else "pt", + ).frames[0] + end = time.time() + if dist.get_rank() == 0: + if i == 0: + print(f"预热时间: {end - begin:.2f}s") + else: + print(f"时间: {end - begin:.2f}s") + +if dist.get_rank() == 0: + print("保存视频到 hunyuan_video.mp4") + export_to_video(output, "hunyuan_video.mp4", fps=15) + +dist.destroy_process_group() +``` + +保存到 `run_hunyuan_video.py` 并使用 [torchrun](https://pytorch.org/docs/stable/elastic/run.html) 启动。 + +```bash +# 使用 --nproc_per_node 指定 GPU 数量 +torchrun --nproc_per_node=8 run_hunyuan_video.py +``` + +推理速度降低到 649.23 秒,相比基线快 5.66 倍,使用 8 个 NVIDIA L20 GPU。 + + + + +## 基准测试 + + + + +| GPU 类型 | GPU 数量 | 优化 | 墙钟时间 (s) | 加速比 | +| - | - | - | - | - | +| NVIDIA L20 | 1 | 基线 | 26.36 | 1.00x | +| NVIDIA L20 | 1 | FBCache (rdt=0.08) | 17.01 | 1.55x | +| NVIDIA L20 | 1 | FP8 DQ | 13.40 | 1.96x | +| NVIDIA L20 | 1 | FBCache (rdt=0.12) + FP8 DQ | 7.56 | 3.48x | +| NVIDIA L20 | 2 | FBCache (rdt=0.12) + FP8 DQ + CP | 4.92 | 5.35x | +| NVIDIA L20 | 4 | FBCache (rdt=0.12) + FP8 DQ + CP | 3.90 | 6.75x | + + + + +| GPU 类型 | GPU 数量 | 优化 | 墙钟时间 (s) | 加速比 | +| - | - | - | - | - | +| NVIDIA L20 | 1 | 基线 | 3675.71 | 1.00x | +| NVIDIA +L20 | 1 | FBCache | 2271.06 | 1.62x | +| NVIDIA L20 | 2 | FBCache + CP | 1132.90 | 3.24x | +| NVIDIA L20 | 4 | FBCache + CP | 718.15 | 5.12x | +| NVIDIA L20 | 8 | FBCache + CP | 649.23 | 5.66x | + + + \ No newline at end of file diff --git a/docs/source/zh/optimization/pruna.md b/docs/source/zh/optimization/pruna.md new file mode 100644 index 0000000000..31cc3d52fa --- /dev/null +++ b/docs/source/zh/optimization/pruna.md @@ -0,0 +1,184 @@ +# Pruna + +[Pruna](https://github.com/PrunaAI/pruna) 是一个模型优化框架,提供多种优化方法——量化、剪枝、缓存、编译——以加速推理并减少内存使用。以下是优化方法的概览。 + +| 技术 | 描述 | 速度 | 内存 | 质量 | +|------------|---------------------------------------------------------------------------------------|:----:|:----:|:----:| +| `batcher` | 将多个输入分组在一起同时处理,提高计算效率并减少处理时间。 | ✅ | ❌ | ➖ | +| `cacher` | 存储计算的中间结果以加速后续操作。 | ✅ | ➖ | ➖ | +| `compiler` | 为特定硬件优化模型指令。 | ✅ | ➖ | ➖ | +| `distiller`| 训练一个更小、更简单的模型来模仿一个更大、更复杂的模型。 | ✅ | ✅ | ❌ | +| `quantizer`| 降低权重和激活的精度,减少内存需求。 | ✅ | ✅ | ❌ | +| `pruner` | 移除不重要或冗余的连接和神经元,产生一个更稀疏、更高效的网络。 | ✅ | ✅ | ❌ | +| `recoverer`| 在压缩后恢复模型的性能。 | ➖ | ➖ | ✅ | +| `factorizer`| 将多个小矩阵乘法批处理为一个大型融合操作。 | ✅ | ➖ | ➖ | +| `enhancer` | 通过应用后处理算法(如去噪或上采样)来增强模型输出。 | ❌ | - | ✅ | + +✅ (改进), ➖ (大致相同), ❌ (恶化) + +在 [Pruna 文档](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html#configure-algorithms) 中探索所有优化方法。 + +## 安装 + +使用以下命令安装 Pruna。 + +```bash +pip install pruna +``` + +## 优化 Diffusers 模型 + +Diffusers 模型支持广泛的优化算法,如下所示。 + +
+ Diffusers 模型支持的优化算法概览 +
+ +下面的示例使用 factorizer、compiler 和 cacher 算法的组合优化 [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)。这种组合将推理速度加速高达 4.2 倍,并将峰值 GPU 内存使用从 34.7GB 减少到 28.0GB,同时几乎保持相同的输出质量。 + +> [!TIP] +> 参考 [Pruna 优化](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html) 文档以了解更多关于该操作的信息。 +本示例中使用的优化技术。 + +
+ 用于FLUX.1-dev的优化技术展示,结合了因子分解器、编译器和缓存器算法 +
+ +首先定义一个包含要使用的优化算法的`SmashConfig`。要优化模型,将管道和`SmashConfig`用`smash`包装,然后像往常一样使用管道进行推理。 + +```python +import torch +from diffusers import FluxPipeline + +from pruna import PrunaModel, SmashConfig, smash + +# 加载模型 +# 使用小GPU内存尝试segmind/Segmind-Vega或black-forest-labs/FLUX.1-schnell +pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + torch_dtype=torch.bfloat16 +).to("cuda") + +# 定义配置 +smash_config = SmashConfig() +smash_config["factorizer"] = "qkv_diffusers" +smash_config["compiler"] = "torch_compile" +smash_config["torch_compile_target"] = "module_list" +smash_config["cacher"] = "fora" +smash_config["fora_interval"] = 2 + +# 为了获得最佳速度结果,可以添加这些配置 +# 但它们会将预热时间从1.5分钟增加到10分钟 +# smash_config["torch_compile_mode"] = "max-autotune-no-cudagraphs" +# smash_config["quantizer"] = "torchao" +# smash_config["torchao_quant_type"] = "fp8dq" +# smash_config["torchao_excluded_modules"] = "norm+embedding" + +# 优化模型 +smashed_pipe = smash(pipe, smash_config) + +# 运行模型 +smashed_pipe("a knitted purple prune").images[0] +``` + +
+ +
+ +优化后,我们可以使用Hugging Face Hub共享和加载优化后的模型。 + +```python +# 保存模型 +smashed_pipe.save_to_hub("/FLUX.1-dev-smashed") + +# 加载模型 +smashed_pipe = PrunaModel.from_hub("/FLUX.1-dev-smashed") +``` + +## 评估和基准测试Diffusers模型 + +Pruna提供了[EvaluationAgent](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/evaluate.html)来评估优化后模型的质量。 + +我们可以定义我们关心的指标,如总时间和吞吐量,以及要评估的数据集。我们可以定义一个模型并将其传递给`EvaluationAgent`。 + + + + +我们可以通过使用`EvaluationAgent`加载和评估优化后的模型,并将其传递给`Task`。 + +```python +import torch +from diffusers import FluxPipeline + +from pruna import PrunaModel +from pruna.data.pruna_datamodule import PrunaDataModule +from pruna.evaluation.evaluation_agent import EvaluationAgent +from pruna.evaluation.metrics import ( + ThroughputMetric, + TorchMetricWrapper, + TotalTimeMetric, +) +from pruna.evaluation.task import Task + +# define the device +device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" + +# 加载模型 +# 使用小GPU内存尝试 PrunaAI/Segmind-Vega-smashed 或 PrunaAI/FLUX.1-dev-smashed +smashed_pipe = PrunaModel.from_hub("PrunaAI/FLUX.1-dev-smashed") + +# 定义指标 +metrics = [ + TotalTimeMetric(n_iterations=20, n_warmup_iterations=5), + ThroughputMetric(n_iterations=20, n_warmup_iterations=5), + TorchMetricWrapper("clip"), +] + +# 定义数据模块 +datamodule = PrunaDataModule.from_string("LAION256") +datamodule.limit_datasets(10) + +# 定义任务和评估代理 +task = Task(metrics, datamodule=datamodule, device=device) +eval_agent = EvaluationAgent(task) + +# 评估优化模型并卸载到CPU +smashed_pipe.move_to_device(device) +smashed_pipe_results = eval_agent.evaluate(smashed_pipe) +smashed_pipe.move_to_device("cpu") +``` + + + + +除了比较优化模型与基础模型,您还可以评估独立的 `diffusers` 模型。这在您想评估模型性能而不考虑优化时非常有用。我们可以通过使用 `PrunaModel` 包装器并运行 `EvaluationAgent` 来实现。 + +```python +import torch +from diffusers import FluxPipeline + +from pruna import PrunaModel + +# 加载模型 +# 使用小GPU内存尝试 PrunaAI/Segmind-Vega-smashed 或 PrunaAI/FLUX.1-dev-smashed +pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + torch_dtype=torch.bfloat16 +).to("cpu") +wrapped_pipe = PrunaModel(model=pipe) +``` + + + + +现在您已经了解了如何优化和评估您的模型,可以开始使用 Pruna 来优化您自己的模型了。幸运的是,我们有许多示例来帮助您入门。 + +> [!TIP] +> 有关基准测试 Flux 的更多详细信息,请查看 [宣布 FLUX-Juiced:最快的图像生成端点(快 2.6 倍)!](https://huggingface.co/blog/PrunaAI/flux-fastest-image-generation-endpoint) 博客文章和 [InferBench](https://huggingface.co/spaces/PrunaAI/InferBench) 空间。 + +## 参考 + +- [Pruna](https://github.com/pruna-ai/pruna) +- [Pruna 优化](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html#configure-algorithms) +- [Pruna 评估](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/evaluate.html) +- [Pruna 教程](https://docs.pruna.ai/en/stable/docs_pruna/tutorials/index.html) \ No newline at end of file diff --git a/docs/source/zh/optimization/speed-memory-optims.md b/docs/source/zh/optimization/speed-memory-optims.md new file mode 100644 index 0000000000..48f1483d3e --- /dev/null +++ b/docs/source/zh/optimization/speed-memory-optims.md @@ -0,0 +1,200 @@ + + +# 编译和卸载量化模型 + +优化模型通常涉及[推理速度](./fp16)和[内存使用](./memory)之间的权衡。例如,虽然[缓存](./cache)可以提高推理速度,但它也会增加内存消耗,因为它需要存储中间注意力层的输出。一种更平衡的优化策略结合了量化模型、[torch.compile](./fp16#torchcompile) 和各种[卸载方法](./memory#offloading)。 + +> [!TIP] +> 查看 [torch.compile](./fp16#torchcompile) 指南以了解更多关于编译以及如何在此处应用的信息。例如,区域编译可以显著减少编译时间,而不会放弃任何加速。 + +对于图像生成,结合量化和[模型卸载](./memory#model-offloading)通常可以在质量、速度和内存之间提供最佳权衡。组卸载对于图像生成效果不佳,因为如果计算内核更快完成,通常不可能*完全*重叠数据传输。这会导致 CPU 和 GPU 之间的一些通信开销。 + +对于视频生成,结合量化和[组卸载](./memory#group-offloading)往往更好,因为视频模型更受计算限制。 + +下表提供了优化策略组合及其对 Flux 延迟和内存使用的影响的比较。 + +| 组合 | 延迟 (s) | 内存使用 (GB) | +|---|---|---| +| 量化 | 32.602 | 14.9453 | +| 量化, torch.compile | 25.847 | 14.9448 | +| 量化, torch.compile, 模型 CPU 卸载 | 32.312 | 12.2369 | +这些结果是在 Flux 上使用 RTX 4090 进行基准测试的。transformer 和 text_encoder 组件已量化。如果您有兴趣评估自己的模型,请参考[基准测试脚本](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d)。 + +本指南将向您展示如何使用 [bitsandbytes](../quantization/bitsandbytes#torchcompile) 编译和卸载量化模型。确保您正在使用 [PyTorch nightly](https://pytorch.org/get-started/locally/) 和最新版本的 bitsandbytes。 + +```bash +pip install -U bitsandbytes +``` + +## 量化和 torch.compile + +首先通过[量化](../quantization/overview)模型来减少存储所需的内存,并[编译](./fp16#torchcompile)它以加速推理。 + +配置 [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `capture_dynamic_output_shape_ops = True` 以在编译 bitsandbytes 模型时处理动态输出。 + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.quantizers import PipelineQuantizationConfig + +torch._dynamo.config.capture_dynamic_output_shape_ops = True + +# 量化 +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# 编译 +pipeline.transformer.to(memory_format=torch.channels_last) +pipeline.transformer.compile(mode="max-autotune", fullgraph=True) +pipeline(""" + cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California + highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain +""" +).images[0] +``` + +## 量化、torch.compile 和卸载 + +除了量化和 torch.compile,如果您需要进一步减少内存使用,可以尝试卸载。卸载根据需要将各种层或模型组件从 CPU 移动到 GPU 进行计算。 + +在卸载期间配置 [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `cache_size_limit` 以避免过多的重新编译,并设置 `capture_dynamic_output_shape_ops = True` 以在编译 bitsandbytes 模型时处理动态输出。 + + + + +[模型 CPU 卸载](./memory#model-offloading) 将单个管道组件(如 transformer 模型)在需要计算时移动到 GPU。否则,它会被卸载到 CPU。 + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.quantizers import PipelineQuantizationConfig + +torch._dynamo.config.cache_size_limit = 1000 +torch._dynamo.config.capture_dynamic_output_shape_ops = True + +# 量化 +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# 模型 CPU 卸载 +pipeline.enable_model_cpu_offload() + +# 编译 +pipeline.transformer.compile() +pipeline( + "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" +).images[0] +``` + + + + +[组卸载](./memory#group-offloading) 将单个管道组件(如变换器模型)的内部层移动到 GPU 进行计算,并在不需要时将其卸载。同时,它使用 [CUDA 流](./memory#cuda-stream) 功能来预取下一层以执行。 + +通过重叠计算和数据传输,它比模型 CPU 卸载更快,同时还能节省内存。 + +```py +# pip install ftfy +import torch +from diffusers import AutoModel, DiffusionPipeline +from diffusers.hooks import apply_group_offloading +from diffusers.utils import export_to_video +from diffusers.quantizers import PipelineQuantizationConfig +from transformers import UMT5EncoderModel + +torch._dynamo.config.cache_size_limit = 1000 +torch._dynamo.config.capture_dynamic_output_shape_ops = True + +# 量化 +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder"], +) + +text_encoder = UMT5EncoderModel.from_pretrained( + "Wan-AI/Wan2.1-T2V-14B-Diffusers", subfolder="text_encoder", torch_dtype=torch.bfloat16 +) +pipeline = DiffusionPipeline.from_pretrained( + "Wan-AI/Wan2.1-T2V-14B-Diffusers", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# 组卸载 +onload_device = torch.device("cuda") +offload_device = torch.device("cpu") + +pipeline.transformer.enable_group_offload( + onload_device=onload_device, + offload_device=offload_device, + offload_type="leaf_level", + use_stream=True, + non_blocking=True +) +pipeline.vae.enable_group_offload( + onload_device=onload_device, + offload_device=offload_device, + offload_type="leaf_level", + use_stream=True, + non_blocking=True +) +apply_group_offloading( + pipeline.text_encoder, + onload_device=onload_device, + offload_type="leaf_level", + use_stream=True, + non_blocking=True +) + +# 编译 +pipeline.transformer.compile() + +prompt = """ +The camera rushes from far to near in a low-angle shot, +revealing a white ferret on a log. It plays, leaps into the water, and emerges, as the camera zooms in +for a close-up. Water splashes berry bushes nearby, while moss, snow, and leaves blanket the ground. +Birch trees and a light blue sky frame the scene, with ferns in the foreground. Side lighting casts dynamic +shadows and warm highlights. Medium composition, front view, low angle, with depth of field. +""" +negative_prompt = """ +Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, +low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, +misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards +""" + +output = pipeline( + prompt=prompt, + negative_prompt=negative_prompt, + num_frames=81, + guidance_scale=5.0, +).frames[0] +export_to_video(output, "output.mp4", fps=16) +``` + + + \ No newline at end of file diff --git a/docs/source/zh/optimization/tgate.md b/docs/source/zh/optimization/tgate.md new file mode 100644 index 0000000000..f15b9bde84 --- /dev/null +++ b/docs/source/zh/optimization/tgate.md @@ -0,0 +1,182 @@ +# T-GATE + +[T-GATE](https://github.com/HaozheLiu-ST/T-GATE/tree/main) 通过跳过交叉注意力计算一旦收敛,加速了 [Stable Diffusion](../api/pipelines/stable_diffusion/overview)、[PixArt](../api/pipelines/pixart) 和 [Latency Consistency Model](../api/pipelines/latent_consistency_models.md) 管道的推理。此方法不需要任何额外训练,可以将推理速度提高 10-50%。T-GATE 还与 [DeepCache](./deepcache) 等其他优化方法兼容。 + +开始之前,请确保安装 T-GATE。 + +```bash +pip install tgate +pip install -U torch diffusers transformers accelerate DeepCache +``` + +要使用 T-GATE 与管道,您需要使用其对应的加载器。 + +| 管道 | T-GATE 加载器 | +|---|---| +| PixArt | TgatePixArtLoader | +| Stable Diffusion XL | TgateSDXLLoader | +| Stable Diffusion XL + DeepCache | TgateSDXLDeepCacheLoader | +| Stable Diffusion | TgateSDLoader | +| Stable Diffusion + DeepCache | TgateSDDeepCacheLoader | + +接下来,创建一个 `TgateLoader`,包含管道、门限步骤(停止计算交叉注意力的时间步)和推理步骤数。然后在管道上调用 `tgate` 方法,提供提示、门限步骤和推理步骤数。 + +让我们看看如何为几个不同的管道启用此功能。 + + + + +使用 T-GATE 加速 `PixArtAlphaPipeline`: + +```py +import torch +from diffusers import PixArtAlphaPipeline +from tgate import TgatePixArtLoader + +pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16) + +gate_step = 8 +inference_step = 25 +pipe = TgatePixArtLoader( + pipe, + gate_step=gate_step, + num_inference_steps=inference_step, +).to("cuda") + +image = pipe.tgate( + "An alpaca made of colorful building blocks, cyberpunk.", + gate_step=gate_step, + num_inference_steps=inference_step, +).images[0] +``` + + + +使用 T-GATE 加速 `StableDiffusionXLPipeline`: + +```py +import torch +from diffusers import StableDiffusionXLPipeline +from diffusers import DPMSolverMultistepScheduler +from tgate import TgateSDXLLoader + +pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, + variant="fp16", + use_safetensors=True, +) +pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + +gate_step = 10 +inference_step = 25 +pipe = TgateSDXLLoader( + pipe, + gate_step=gate_step, + num_inference_steps=inference_step, +).to("cuda") + +image = pipe.tgate( + "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", + gate_step=gate_step, + num_inference_steps=inference_step +).images[0] +``` + + + +使用 [DeepCache](https://github.co 加速 `StableDiffusionXLPipeline` +m/horseee/DeepCache) 和 T-GATE: + +```py +import torch +from diffusers import StableDiffusionXLPipeline +from diffusers import DPMSolverMultistepScheduler +from tgate import TgateSDXLDeepCacheLoader + +pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, + variant="fp16", + use_safetensors=True, +) +pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + +gate_step = 10 +inference_step = 25 +pipe = TgateSDXLDeepCacheLoader( + pipe, + cache_interval=3, + cache_branch_id=0, +).to("cuda") + +image = pipe.tgate( + "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", + gate_step=gate_step, + num_inference_steps=inference_step +).images[0] +``` + + + +使用 T-GATE 加速 `latent-consistency/lcm-sdxl`: + +```py +import torch +from diffusers import StableDiffusionXLPipeline +from diffusers import UNet2DConditionModel, LCMScheduler +from diffusers import DPMSolverMultistepScheduler +from tgate import TgateSDXLLoader + +unet = UNet2DConditionModel.from_pretrained( + "latent-consistency/lcm-sdxl", + torch_dtype=torch.float16, + variant="fp16", +) +pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + unet=unet, + torch_dtype=torch.float16, + variant="fp16", +) +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + +gate_step = 1 +inference_step = 4 +pipe = TgateSDXLLoader( + pipe, + gate_step=gate_step, + num_inference_steps=inference_step, + lcm=True +).to("cuda") + +image = pipe.tgate( + "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", + gate_step=gate_step, + num_inference_steps=inference_step +).images[0] +``` + + + +T-GATE 还支持 [`StableDiffusionPipeline`] 和 [PixArt-alpha/PixArt-LCM-XL-2-1024-MS](https://hf.co/PixArt-alpha/PixArt-LCM-XL-2-1024-MS)。 + +## 基准测试 +| 模型 | MACs | 参数 | 延迟 | 零样本 10K-FID on MS-COCO | +|-----------------------|----------|-----------|---------|---------------------------| +| SD-1.5 | 16.938T | 859.520M | 7.032s | 23.927 | +| SD-1.5 w/ T-GATE | 9.875T | 815.557M | 4.313s | 20.789 | +| SD-2.1 | 38.041T | 865.785M | 16.121s | 22.609 | +| SD-2.1 w/ T-GATE | 22.208T | 815.433 M | 9.878s | 19.940 | +| SD-XL | 149.438T | 2.570B | 53.187s | 24.628 | +| SD-XL w/ T-GATE | 84.438T | 2.024B | 27.932s | 22.738 | +| Pixart-Alpha | 107.031T | 611.350M | 61.502s | 38.669 | +| Pixart-Alpha w/ T-GATE | 65.318T | 462.585M | 37.867s | 35.825 | +| DeepCache (SD-XL) | 57.888T | - | 19.931s | 23.755 | +| DeepCache 配合 T-GATE | 43.868T | - | 14.666秒 | 23.999 | +| LCM (SD-XL) | 11.955T | 2.570B | 3.805秒 | 25.044 | +| LCM 配合 T-GATE | 11.171T | 2.024B | 3.533秒 | 25.028 | +| LCM (Pixart-Alpha) | 8.563T | 611.350M | 4.733秒 | 36.086 | +| LCM 配合 T-GATE | 7.623T | 462.585M | 4.543秒 | 37.048 | + +延迟测试基于 NVIDIA 1080TI,MACs 和 Params 使用 [calflops](https://github.com/MrYxJ/calculate-flops.pytorch) 计算,FID 使用 [PytorchFID](https://github.com/mseitzer/pytorch-fid) 计算。 \ No newline at end of file diff --git a/docs/source/zh/optimization/tome.md b/docs/source/zh/optimization/tome.md new file mode 100644 index 0000000000..732777c558 --- /dev/null +++ b/docs/source/zh/optimization/tome.md @@ -0,0 +1,90 @@ + + +# 令牌合并 + +[令牌合并](https://huggingface.co/papers/2303.17604)(ToMe)在基于 Transformer 的网络的前向传递中逐步合并冗余令牌/补丁,这可以加速 [`StableDiffusionPipeline`] 的推理延迟。 + +从 `pip` 安装 ToMe: + +```bash +pip install tomesd +``` + +您可以使用 [`tomesd`](https://github.com/dbolya/tomesd) 库中的 [`apply_patch`](https://github.com/dbolya/tomesd?tab=readme-ov-file#usage) 函数: + +```diff + from diffusers import StableDiffusionPipeline + import torch + import tomesd + + pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, + ).to("cuda") ++ tomesd.apply_patch(pipeline, ratio=0.5) + + image = pipeline("a photo of an astronaut riding a horse on mars").images[0] +``` + +`apply_patch` 函数公开了多个[参数](https://github.com/dbolya/tomesd#usage),以帮助在管道推理速度和生成令牌的质量之间取得平衡。最重要的参数是 `ratio`,它控制在前向传递期间合并的令牌数量。 + +如[论文](https://huggingface.co/papers/2303.17604)中所述,ToMe 可以在显著提升推理速度的同时,很大程度上保留生成图像的质量。通过增加 `ratio`,您可以进一步加速推理,但代价是图像质量有所下降。 + +为了测试生成图像的质量,我们从 [Parti Prompts](https://parti.research.google/) 中采样了一些提示,并使用 [`StableDiffusionPipeline`] 进行了推理,设置如下: + +
+ +
+ +我们没有注意到生成样本的质量有任何显著下降,您可以在此 [WandB 报告](https://wandb.ai/sayakpaul/tomesd-results/runs/23j4bj3i?workspace=)中查看生成的样本。如果您有兴趣重现此实验,请使用此[脚本](https://gist.github.com/sayakpaul/8cac98d7f22399085a060992f411ecbd)。 + +## 基准测试 + +我们还在启用 [xFormers](https://huggingface.co/docs/diffusers/optimization/xformers) 的情况下,对 [`StableDiffusionPipeline`] 上 `tomesd` 的影响进行了基准测试,涵盖了多个图像分辨率。结果 +结果是从以下开发环境中的A100和V100 GPU获得的: + +```bash +- `diffusers` 版本:0.15.1 +- Python 版本:3.8.16 +- PyTorch 版本(GPU?):1.13.1+cu116 (True) +- Huggingface_hub 版本:0.13.2 +- Transformers 版本:4.27.2 +- Accelerate 版本:0.18.0 +- xFormers 版本:0.0.16 +- tomesd 版本:0.1.2 +``` + +要重现此基准测试,请随意使用此[脚本](https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335)。结果以秒为单位报告,并且在适用的情况下,我们报告了使用ToMe和ToMe + xFormers时相对于原始管道的加速百分比。 + +| **GPU** | **分辨率** | **批处理大小** | **原始** | **ToMe** | **ToMe + xFormers** | +|----------|----------------|----------------|-------------|----------------|---------------------| +| **A100** | 512 | 10 | 6.88 | 5.26 (+23.55%) | 4.69 (+31.83%) | +| | 768 | 10 | OOM | 14.71 | 11 | +| | | 8 | OOM | 11.56 | 8.84 | +| | | 4 | OOM | 5.98 | 4.66 | +| | | 2 | 4.99 | 3.24 (+35.07%) | 2.1 (+37.88%) | +| | | 1 | 3.29 | 2.24 (+31.91%) | 2.03 (+38.3%) | +| | 1024 | 10 | OOM | OOM | OOM | +| | | 8 | OOM | OOM | OOM | +| | | 4 | OOM | 12.51 | 9.09 | +| | | 2 | OOM | 6.52 | 4.96 | +| | | 1 | 6.4 | 3.61 (+43.59%) | 2.81 (+56.09%) | +| **V100** | 512 | 10 | OOM | 10.03 | 9.29 | +| | | 8 | OOM | 8.05 | 7.47 | +| | | 4 | 5.7 | 4.3 (+24.56%) | 3.98 (+30.18%) | +| | | 2 | 3.14 | 2.43 (+22.61%) | 2.27 (+27.71%) | +| | | 1 | 1.88 | 1.57 (+16.49%) | 1.57 (+16.49%) | +| | 768 | 10 | OOM | OOM | 23.67 | +| | | 8 | OOM | OOM | 18.81 | +| | | 4 | OOM | 11.81 | 9.7 | +| | | 2 | OOM | 6.27 | 5.2 | +| | | 1 | 5.43 | 3.38 (+37.75%) | 2.82 (+48.07%) | +| | 1024 | 10 | OOM | +如上表所示,`tomesd` 带来的加速效果在更大的图像分辨率下变得更加明显。有趣的是,使用 `tomesd` 可以在更高分辨率如 1024x1024 上运行管道。您可能还可以通过 [`torch.compile`](fp16#torchcompile) 进一步加速推理。 \ No newline at end of file diff --git a/docs/source/zh/optimization/xdit.md b/docs/source/zh/optimization/xdit.md new file mode 100644 index 0000000000..3308536d06 --- /dev/null +++ b/docs/source/zh/optimization/xdit.md @@ -0,0 +1,119 @@ +# xDiT + +[xDiT](https://github.com/xdit-project/xDiT) 是一个推理引擎,专为大规模并行部署扩散变换器(DiTs)而设计。xDiT 提供了一套用于扩散模型的高效并行方法,以及 GPU 内核加速。 + +xDiT 支持四种并行方法,包括[统一序列并行](https://huggingface.co/papers/2405.07719)、[PipeFusion](https://huggingface.co/papers/2405.14430)、CFG 并行和数据并行。xDiT 中的这四种并行方法可以以混合方式配置,优化通信模式以最适合底层网络硬件。 + +与并行化正交的优化侧重于加速单个 GPU 的性能。除了利用知名的注意力优化库外,我们还利用编译加速技术,如 torch.compile 和 onediff。 + +xDiT 的概述如下所示。 + +
+ +
+您可以使用以下命令安装 xDiT: + +```bash +pip install xfuser +``` + +以下是一个使用 xDiT 加速 Diffusers 模型推理的示例。 + +```diff + import torch + from diffusers import StableDiffusion3Pipeline + + from xfuser import xFuserArgs, xDiTParallel + from xfuser.config import FlexibleArgumentParser + from xfuser.core.distributed import get_world_group + + def main(): ++ parser = FlexibleArgumentParser(description="xFuser Arguments") ++ args = xFuserArgs.add_cli_args(parser).parse_args() ++ engine_args = xFuserArgs.from_cli_args(args) ++ engine_config, input_config = engine_args.create_config() + + local_rank = get_world_group().local_rank + pipe = StableDiffusion3Pipeline.from_pretrained( + pretrained_model_name_or_path=engine_config.model_config.model, + torch_dtype=torch.float16, + ).to(f"cuda:{local_rank}") + +# 在这里对管道进行任何操作 + ++ pipe = xDiTParallel(pipe, engine_config, input_config) + + pipe( + height=input_config.height, + width=input_config.height, + prompt=input_config.prompt, + num_inference_steps=input_config.num_inference_steps, + output_type=input_config.output_type, + generator=torch.Generator(device="cuda").manual_seed(input_config.seed), + ) + ++ if input_config.output_type == "pil": ++ pipe.save("results", "stable_diffusion_3") + +if __name__ == "__main__": + main() +``` + +如您所见,我们只需要使用 xDiT 中的 xFuserArgs 来获取配置参数,并将这些参数与来自 Diffusers 库的管道对象一起传递给 xDiTParallel,即可完成对 Diffusers 中特定管道的并行化。 + +xDiT 运行时参数可以在命令行中使用 `-h` 查看,您可以参考此[使用](https://github.com/xdit-project/xDiT?tab=readme-ov-file#2-usage)示例以获取更多详细信息。 +ils。 + +xDiT 需要使用 torchrun 启动,以支持其多节点、多 GPU 并行能力。例如,以下命令可用于 8-GPU 并行推理: + +```bash +torchrun --nproc_per_node=8 ./inference.py --model models/FLUX.1-dev --data_parallel_degree 2 --ulysses_degree 2 --ring_degree 2 --prompt "A snowy mountain" "A small dog" --num_inference_steps 50 +``` + +## 支持的模型 + +在 xDiT 中支持 Diffusers 模型的一个子集,例如 Flux.1、Stable Diffusion 3 等。最新支持的模型可以在[这里](https://github.com/xdit-project/xDiT?tab=readme-ov-file#-supported-dits)找到。 + +## 基准测试 +我们在不同机器上测试了各种模型,以下是一些基准数据。 + +### Flux.1-schnell +
+ +
+ +
+ +
+ +### Stable Diffusion 3 +
+ +
+ +
+ +
+ +### HunyuanDiT +
+ +
+ +
+ +
+ +
+ +
+ +更详细的性能指标可以在我们的 [GitHub 页面](https://github.com/xdit-project/xDiT?tab=readme-ov-file#perf) 上找到。 + +## 参考文献 + +[xDiT-project](https://github.com/xdit-project/xDiT) + +[USP: A Unified Sequence Parallelism Approach for Long Context Generative AI](https://huggingface.co/papers/2405.07719) + +[PipeFusion: Displaced Patch Pipeline Parallelism for Inference of Diffusion Transformer Models](https://huggingface.co/papers/2405.14430) \ No newline at end of file diff --git a/docs/source/zh/training/distributed_inference.md b/docs/source/zh/training/distributed_inference.md new file mode 100644 index 0000000000..ec35b5e730 --- /dev/null +++ b/docs/source/zh/training/distributed_inference.md @@ -0,0 +1,239 @@ + + +# 分布式推理 + +在分布式设置中,您可以使用 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 或 [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html) 在多个 GPU 上运行推理,这对于并行生成多个提示非常有用。 + +本指南将向您展示如何使用 🤗 Accelerate 和 PyTorch Distributed 进行分布式推理。 + +## 🤗 Accelerate + +🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 是一个旨在简化在分布式设置中训练或运行推理的库。它简化了设置分布式环境的过程,让您可以专注于您的 PyTorch 代码。 + +首先,创建一个 Python 文件并初始化一个 [`accelerate.PartialState`] 来创建分布式环境;您的设置会自动检测,因此您无需明确定义 `rank` 或 `world_size`。将 [`DiffusionPipeline`] 移动到 `distributed_state.device` 以为每个进程分配一个 GPU。 + +现在使用 [`~accelerate.PartialState.split_between_processes`] 实用程序作为上下文管理器,自动在进程数之间分发提示。 + +```py +import torch +from accelerate import PartialState +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True +) +distributed_state = PartialState() +pipeline.to(distributed_state.device) + +with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: + result = pipeline(prompt).images[0] + result.save(f"result_{distributed_state.process_index}.png") +``` + +使用 `--num_processes` 参数指定要使用的 GPU 数量,并调用 `accelerate launch` 来运行脚本: + +```bash +accelerate launch run_distributed.py --num_processes=2 +``` + + + +参考这个最小示例 [脚本](https://gist.github.com/sayakpaul/cfaebd221820d7b43fae638b4dfa01ba) 以在多个 GPU 上运行推理。要了解更多信息,请查看 [使用 🤗 Accelerate 进行分布式推理](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) 指南。 + + + +## PyTorch Distributed + +PyTorch 支持 [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html),它启用了数据 +并行性。 + +首先,创建一个 Python 文件并导入 `torch.distributed` 和 `torch.multiprocessing` 来设置分布式进程组,并为每个 GPU 上的推理生成进程。您还应该初始化一个 [`DiffusionPipeline`]: + +```py +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +from diffusers import DiffusionPipeline + +sd = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True +) +``` + +您需要创建一个函数来运行推理;[`init_process_group`](https://pytorch.org/docs/stable/distributed.html?highlight=init_process_group#torch.distributed.init_process_group) 处理创建一个分布式环境,指定要使用的后端类型、当前进程的 `rank` 以及参与进程的数量 `world_size`。如果您在 2 个 GPU 上并行运行推理,那么 `world_size` 就是 2。 + +将 [`DiffusionPipeline`] 移动到 `rank`,并使用 `get_rank` 为每个进程分配一个 GPU,其中每个进程处理不同的提示: + +```py +def run_inference(rank, world_size): + dist.init_process_group("nccl", rank=rank, world_size=world_size) + + sd.to(rank) + + if torch.distributed.get_rank() == 0: + prompt = "a dog" + elif torch.distributed.get_rank() == 1: + prompt = "a cat" + + image = sd(prompt).images[0] + image.save(f"./{'_'.join(prompt)}.png") +``` + +要运行分布式推理,调用 [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn) 在 `world_size` 定义的 GPU 数量上运行 `run_inference` 函数: + +```py +def main(): + world_size = 2 + mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True) + + +if __name__ == "__main__": + main() +``` + +完成推理脚本后,使用 `--nproc_per_node` 参数指定要使用的 GPU 数量,并调用 `torchrun` 来运行脚本: + +```bash +torchrun run_distributed.py --nproc_per_node=2 +``` + +> [!TIP] +> 您可以在 [`DiffusionPipeline`] 中使用 `device_map` 将其模型级组件分布在多个设备上。请参考 [设备放置](../tutorials/inference_with_big_models#device-placement) 指南了解更多信息。 + +## 模型分片 + +现代扩散系统,如 [Flux](../api/pipelines/flux),非常大且包含多个模型。例如,[Flux.1-Dev](https://hf.co/black-forest-labs/FLUX.1-dev) 由两个文本编码器 - [T5-XXL](https://hf.co/google/t5-v1_1-xxl) 和 [CLIP-L](https://hf.co/openai/clip-vit-large-patch14) - 一个 [扩散变换器](../api/models/flux_transformer),以及一个 [VAE](../api/models/autoencoderkl) 组成。对于如此大的模型,在消费级 GPU 上运行推理可能具有挑战性。 + +模型分片是一种技术,当模型无法容纳在单个 GPU 上时,将模型分布在多个 GPU 上。下面的示例假设有两个 16GB GPU 可用于推理。 + +开始使用文本编码器计算文本嵌入。通过设置 `device_map="balanced"` 将文本编码器保持在两个GPU上。`balanced` 策略将模型均匀分布在所有可用GPU上。使用 `max_memory` 参数为每个GPU上的每个文本编码器分配最大内存量。 + +> [!TIP] +> **仅** 在此步骤加载文本编码器!扩散变换器和VAE在后续步骤中加载以节省内存。 + +```py +from diffusers import FluxPipeline +import torch + +prompt = "a photo of a dog with cat-like look" + +pipeline = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + transformer=None, + vae=None, + device_map="balanced", + max_memory={0: "16GB", 1: "16GB"}, + torch_dtype=torch.bfloat16 +) +with torch.no_grad(): + print("Encoding prompts.") + prompt_embeds, pooled_prompt_embeds, text_ids = pipeline.encode_prompt( + prompt=prompt, prompt_2=None, max_sequence_length=512 + ) +``` + +一旦文本嵌入计算完成,从GPU中移除它们以为扩散变换器腾出空间。 + +```py +import gc + +def flush(): + gc.collect() + torch.cuda.empty_cache() + torch.cuda.reset_max_memory_allocated() + torch.cuda.reset_peak_memory_stats() + +del pipeline.text_encoder +del pipeline.text_encoder_2 +del pipeline.tokenizer +del pipeline.tokenizer_2 +del pipeline + +flush() +``` + +接下来加载扩散变换器,它有125亿参数。这次,设置 `device_map="auto"` 以自动将模型分布在两个16GB GPU上。`auto` 策略由 [Accelerate](https://hf.co/docs/accelerate/index) 支持,并作为 [大模型推理](https://hf.co/docs/accelerate/concept_guides/big_model_inference) 功能的一部分可用。它首先将模型分布在最快的设备(GPU)上,然后在需要时移动到较慢的设备如CPU和硬盘。将模型参数存储在较慢设备上的权衡是推理延迟较慢。 + +```py +from diffusers import AutoModel +import torch + +transformer = AutoModel.from_pretrained( + "black-forest-labs/FLUX.1-dev", + subfolder="transformer", + device_map="auto", + torch_dtype=torch.bfloat16 +) +``` + +> [!TIP] +> 在任何时候,您可以尝试 `print(pipeline.hf_device_map)` 来查看各种模型如何在设备上分布。这对于跟踪模型的设备放置很有用。您也可以尝试 `print(transformer.hf_device_map)` 来查看变换器模型如何在设备上分片。 + +将变换器模型添加到管道中以进行去噪,但将其他模型级组件如文本编码器和VAE设置为 `None`,因为您还不需要它们。 + +```py +pipeline = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + text_encoder=None, + text_encoder_2=None, + tokenizer=None, + tokenizer_2=None, + vae=None, + transformer=transformer, + torch_dtype=torch.bfloat16 +) + +print("Running denoising.") +height, width = 768, 1360 +latents = pipeline( + + +prompt_embeds=prompt_embeds, +pooled_prompt_embeds=pooled_prompt_embeds, +num_inference_steps=50, +guidance_scale=3.5, +height=height, +width=width, +output_type="latent", +).images +``` + +从内存中移除管道和变换器,因为它们不再需要。 + +```py +del pipeline.transformer +del pipeline + +flush() +``` + +最后,使用变分自编码器(VAE)将潜在表示解码为图像。VAE通常足够小,可以在单个GPU上加载。 + +```py +from diffusers import AutoencoderKL +from diffusers.image_processor import VaeImageProcessor +import torch + +vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=torch.bfloat16).to("cuda") +vae_scale_factor = 2 ** (len(vae.config.block_out_channels)) +image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor) + +with torch.no_grad(): + print("运行解码中。") + latents = FluxPipeline._unpack_latents(latents, height, width, vae_scale_factor) + latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor + + image = vae.decode(latents, return_dict=False)[0] + image = image_processor.postprocess(image, output_type="pil") + image[0].save("split_transformer.png") +``` + +通过选择性加载和卸载在特定阶段所需的模型,并将最大模型分片到多个GPU上,可以在消费级GPU上运行大型模型的推理。 \ No newline at end of file diff --git a/docs/source/zh/training/dreambooth.md b/docs/source/zh/training/dreambooth.md new file mode 100644 index 0000000000..493c5385ff --- /dev/null +++ b/docs/source/zh/training/dreambooth.md @@ -0,0 +1,643 @@ + + +# DreamBooth + +[DreamBooth](https://huggingface.co/papers/2208.12242) 是一种训练技术,通过仅训练少数主题或风格的图像来更新整个扩散模型。它通过在提示中关联一个特殊词与示例图像来工作。 + +如果您在 vRAM 有限的 GPU 上训练,应尝试在训练命令中启用 `gradient_checkpointing` 和 `mixed_precision` 参数。您还可以通过使用 [xFormers](../optimization/xformers) 的内存高效注意力来减少内存占用。JAX/Flax 训练也支持在 TPU 和 GPU 上进行高效训练,但不支持梯度检查点或 xFormers。如果您想使用 Flax 更快地训练,应拥有内存 >30GB 的 GPU。 + +本指南将探索 [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) 脚本,帮助您更熟悉它,以及如何根据您的用例进行适配。 + +在运行脚本之前,请确保从源代码安装库: + +```bash +git clone https://github.com/huggingface/diffusers +cd diffusers +pip install . +``` + +导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项: + + + + +```bash +cd examples/dreambooth +pip install -r requirements.txt +``` + + + + +```bash +cd examples/dreambooth +pip install -r requirements_flax.txt +``` + + + + + + +🤗 Accelerate 是一个库,用于帮助您在多个 GPU/TPU 上或使用混合精度进行训练。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour) 以了解更多信息。 + + + +初始化 🤗 Accelerate 环境: + +```bash +accelerate config +``` + +要设置默认的 🤗 Accelerate 环境而不选择任何配置: + +```bash +accelerate config default +``` + +或者,如果您的环境不支持交互式 shell,例如笔记本,您可以使用: + +```py +from accelerate.utils import write_basic_config + +write_basic_config() +``` + +最后,如果您想在自己的数据集上训练模型,请查看 [创建用于训练的数据集](create_dataset) 指南,了解如何创建与 +训练脚本。 + + + +以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读[脚本](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py),并告诉我们如果您有任何问题或疑虑。 + + + +## 脚本参数 + + + +DreamBooth 对训练超参数非常敏感,容易过拟合。阅读 [使用 🧨 Diffusers 训练 Stable Diffusion 与 Dreambooth](https://huggingface.co/blog/dreambooth) 博客文章,了解针对不同主题的推荐设置,以帮助您选择合适的超参数。 + + + +训练脚本提供了许多参数来自定义您的训练运行。所有参数及其描述都可以在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L228) 函数中找到。参数设置了默认值,这些默认值应该开箱即用效果不错,但如果您愿意,也可以在训练命令中设置自己的值。 + +例如,要以 bf16 格式进行训练: + +```bash +accelerate launch train_dreambooth.py \ + --mixed_precision="bf16" +``` + +一些基本且重要的参数需要了解和指定: + +- `--pretrained_model_name_or_path`: Hub 上的模型名称或预训练模型的本地路径 +- `--instance_data_dir`: 包含训练数据集(示例图像)的文件夹路径 +- `--instance_prompt`: 包含示例图像特殊单词的文本提示 +- `--train_text_encoder`: 是否也训练文本编码器 +- `--output_dir`: 保存训练后模型的位置 +- `--push_to_hub`: 是否将训练后的模型推送到 Hub +- `--checkpointing_steps`: 模型训练时保存检查点的频率;这在训练因某种原因中断时很有用,您可以通过在训练命令中添加 `--resume_from_checkpoint` 来从该检查点继续训练 + +### Min-SNR 加权 + +[Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略可以通过重新平衡损失来帮助训练,以实现更快的收敛。训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,但 Min-SNR 与两种预测类型都兼容。此加权策略仅由 PyTorch 支持,在 Flax 训练脚本中不可用。 + +添加 `--snr_gamma` 参数并将其设置为推荐值 5.0: + +```bash +accelerate launch train_dreambooth.py \ + --snr_gamma=5.0 +``` + +### 先验保持损失 + +先验保持损失是一种使用模型自身生成的样本来帮助它学习如何生成更多样化图像的方法。因为这些生成的样本图像属于您提供的图像相同的类别,它们帮助模型 r +etain 它已经学到的关于类别的知识,以及它如何利用已经了解的类别信息来创建新的组合。 + +- `--with_prior_preservation`: 是否使用先验保留损失 +- `--prior_loss_weight`: 控制先验保留损失对模型的影响程度 +- `--class_data_dir`: 包含生成的类别样本图像的文件夹路径 +- `--class_prompt`: 描述生成的样本图像类别的文本提示 + +```bash +accelerate launch train_dreambooth.py \ + --with_prior_preservation \ + --prior_loss_weight=1.0 \ + --class_data_dir="path/to/class/images" \ + --class_prompt="text prompt describing class" +``` + +### 训练文本编码器 + +为了提高生成输出的质量,除了 UNet 之外,您还可以训练文本编码器。这需要额外的内存,并且您需要一个至少有 24GB 显存的 GPU。如果您拥有必要的硬件,那么训练文本编码器会产生更好的结果,尤其是在生成面部图像时。通过以下方式启用此选项: + +```bash +accelerate launch train_dreambooth.py \ + --train_text_encoder +``` + +## 训练脚本 + +DreamBooth 附带了自己的数据集类: + +- [`DreamBoothDataset`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L604): 预处理图像和类别图像,并对提示进行分词以用于训练 +- [`PromptDataset`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L738): 生成提示嵌入以生成类别图像 + +如果您启用了[先验保留损失](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L842),类别图像在此处生成: + +```py +sample_dataset = PromptDataset(args.class_prompt, num_new_images) +sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) + +sample_dataloader = accelerator.prepare(sample_dataloader) +pipeline.to(accelerator.device) + +for example in tqdm( + sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process +): + images = pipeline(example["prompt"]).images +``` + +接下来是 [`main()`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L799) 函数,它处理设置训练数据集和训练循环本身。脚本加载 [tokenizer](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L898)、[scheduler 和 models](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L912C1-L912C1): + +```py +# Load the tokenizer +if args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) +elif args.pretrained_model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="tokenizer", + revision=args.revision, + use_fast=False, + ) + +# 加载调度器和模型 +noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") +text_encoder = text_encoder_cls.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision +) + +if model_has_vae(args): + vae = AutoencoderKL.from_pretrained( + args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision + ) +else: + vae = None + +unet = UNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision +) +``` + +然后,是时候[创建训练数据集](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L1073)和从`DreamBoothDataset`创建DataLoader: + +```py +train_dataset = DreamBoothDataset( + instance_data_root=args.instance_data_dir, + instance_prompt=args.instance_prompt, + class_data_root=args.class_data_dir if args.with_prior_preservation else None, + class_prompt=args.class_prompt, + class_num=args.num_class_images, + tokenizer=tokenizer, + size=args.resolution, + center_crop=args.center_crop, + encoder_hidden_states=pre_computed_encoder_hidden_states, + class_prompt_encoder_hidden_states=pre_computed_class_prompt_encoder_hidden_states, + tokenizer_max_length=args.tokenizer_max_length, +) + +train_dataloader = torch.utils.data.DataLoader( + train_dataset, + batch_size=args.train_batch_size, + shuffle=True, + collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), + num_workers=args.dataloader_num_workers, +) +``` + +最后,[训练循环](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L1151)处理剩余步骤,例如将图像转换为潜在空间、向输入添加噪声、预测噪声残差和计算损失。 + +如果您想了解更多关于训练循环的工作原理,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。 + +## 启动脚本 + +您现在准备好启动训练脚本了!🚀 + +对于本指南,您将下载一些[狗的图片](https://huggingface.co/datasets/diffusers/dog-example)的图像并将它们存储在一个目录中。但请记住,您可以根据需要创建和使用自己的数据集(请参阅[创建用于训练的数据集](create_dataset)指南)。 + +```py +from huggingface_hub import snapshot_download + +local_dir = "./dog" +snapshot_download( + "diffusers/dog-example", + local_dir=local_dir, + repo_type="dataset", + ignore_patterns=".gitattributes", +) +``` + +设置环境变量 `MODEL_NAME` 为 Hub 上的模型 ID 或本地模型路径,`INSTANCE_DIR` 为您刚刚下载狗图像的路径,`OUTPUT_DIR` 为您想保存模型的位置。您将使用 `sks` 作为特殊词来绑定训练。 + +如果您有兴趣跟随训练过程,可以定期保存生成的图像作为训练进度。将以下参数添加到训练命令中: + +```bash +--validation_prompt="a photo of a sks dog" +--num_validation_images=4 +--validation_steps=100 +``` + +在启动脚本之前,还有一件事!根据您拥有的 GPU,您可能需要启用某些优化来训练 DreamBooth。 + + + + +在 16GB GPU 上,您可以使用 bitsandbytes 8 位优化器和梯度检查点来帮助训练 DreamBooth 模型。安装 bitsandbytes: + +```py +pip install bitsandbytes +``` + +然后,将以下参数添加到您的训练命令中: + +```bash +accelerate launch train_dreambooth.py \ + --gradient_checkpointing \ + --use_8bit_adam \ +``` + + + + +在 12GB GPU 上,您需要 bitsandbytes 8 位优化器、梯度检查点、xFormers,并将梯度设置为 `None` 而不是零以减少内存使用。 + +```bash +accelerate launch train_dreambooth.py \ + --use_8bit_adam \ + --gradient_checkpointing \ + --enable_xformers_memory_efficient_attention \ + --set_grads_to_none \ +``` + + + + +在 8GB GPU 上,您需要 [DeepSpeed](https://www.deepspeed.ai/) 将一些张量从 vRAM 卸载到 CPU 或 NVME,以便在更少的 GPU 内存下进行训练。 + +运行以下命令来配置您的 🤗 Accelerate 环境: + +```bash +accelerate config +``` + +在配置过程中,确认您想使用 DeepSpeed。现在,通过结合 DeepSpeed 阶段 2、fp16 混合精度以及将模型参数和优化器状态卸载到 CPU,应该可以在低于 8GB vRAM 的情况下进行训练。缺点是这需要更多的系统 RAM(约 25 GB)。有关更多配置选项,请参阅 [DeepSpeed 文档](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)。 + +您还应将默认的 Adam 优化器更改为 DeepSpeed 的优化版本 [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu) 以获得显著的速度提升。启用 `DeepSpeedCPUAdam` 要求您的系统 CUDA 工具链版本与 PyTorch 安装的版本相同。 + +目前,bitsandbytes 8 位优化器似乎与 DeepSpeed 不兼容。 + +就是这样!您不需要向训练命令添加任何额外参数。 + + + + + + + +```bash +export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" +export INSTANCE_DIR="./dog" +export OUTPUT_DIR="path_to_ +saved_model" + +accelerate launch train_dreambooth.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --instance_data_dir=$INSTANCE_DIR \ + --output_dir=$OUTPUT_DIR \ + --instance_prompt="a photo of sks dog" \ + --resolution=512 \ + --train_batch_size=1 \ + --gradient_accumulation_steps=1 \ + --learning_rate=5e-6 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --max_train_steps=400 \ + --push_to_hub +``` + + + + +```bash +export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" +export INSTANCE_DIR="./dog" +export OUTPUT_DIR="path-to-save-model" + +python train_dreambooth_flax.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --instance_data_dir=$INSTANCE_DIR \ + --output_dir=$OUTPUT_DIR \ + --instance_prompt="a photo of sks dog" \ + --resolution=512 \ + --train_batch_size=1 \ + --learning_rate=5e-6 \ + --max_train_steps=400 \ + --push_to_hub +``` + + + + +训练完成后,您可以使用新训练的模型进行推理! + + + +等不及在训练完成前就尝试您的模型进行推理?🤭 请确保安装了最新版本的 🤗 Accelerate。 + +```py +from diffusers import DiffusionPipeline, UNet2DConditionModel +from transformers import CLIPTextModel +import torch + +unet = UNet2DConditionModel.from_pretrained("path/to/model/checkpoint-100/unet") + +# 如果您使用了 `--args.train_text_encoder` 进行训练,请确保也加载文本编码器 +text_encoder = CLIPTextModel.from_pretrained("path/to/model/checkpoint-100/checkpoint-100/text_encoder") + +pipeline = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", unet=unet, text_encoder=text_encoder, dtype=torch.float16, +).to("cuda") + +image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guidance_scale=7.5).images[0] +image.save("dog-bucket.png") +``` + + + + + + +```py +from diffusers import DiffusionPipeline +import torch + +pipeline = DiffusionPipeline.from_pretrained("path_to_saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda") +image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guidance_scale=7.5).images[0] +image.save("dog-bucket.png") +``` + + + + +```py +import jax +import numpy as np +from flax.jax_utils import replicate +from flax.training.common_utils import shard +from diffusers import FlaxStableDiffusionPipeline + +pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path-to-your-trained-model", dtype=jax.numpy.bfloat16) + +prompt = "A photo of sks dog in a bucket" +prng_seed = jax.random.PRNGKey(0) +num_inference_steps = 50 + +num_samples = jax.device_count() +prompt = num_samples * [prompt] +prompt_ids = pipeline.prepare_inputs(prompt) + +# 分片输入和随机数生成器 +params = replicate(params) +prng_seed = jax.random.split(prng_seed, jax.device_count()) +prompt_ids = shard(prompt_ids) + +images = pipeline(prompt_ids, params, prng_seed, num_inference_ +steps, jit=True).images +images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) +image.save("dog-bucket.png") +``` + + + + +## LoRA + +LoRA 是一种训练技术,可显著减少可训练参数的数量。因此,训练速度更快,并且更容易存储生成的权重,因为它们小得多(约 100MB)。使用 [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) 脚本通过 LoRA 进行训练。 + +LoRA 训练脚本在 [LoRA 训练](lora) 指南中有更详细的讨论。 + +## Stable Diffusion XL + +Stable Diffusion XL (SDXL) 是一个强大的文本到图像模型,可生成高分辨率图像,并在其架构中添加了第二个文本编码器。使用 [train_dreambooth_lora_sdxl.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_sdxl.py) 脚本通过 LoRA 训练 SDXL 模型。 + +SDXL 训练脚本在 [SDXL 训练](sdxl) 指南中有更详细的讨论。 + +## DeepFloyd IF + +DeepFloyd IF 是一个级联像素扩散模型,包含三个阶段。第一阶段生成基础图像,第二和第三阶段逐步将基础图像放大为高分辨率 1024x1024 图像。使用 [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) 或 [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) 脚本通过 LoRA 或完整模型训练 DeepFloyd IF 模型。 + +DeepFloyd IF 使用预测方差,但 Diffusers 训练脚本使用预测误差,因此训练的 DeepFloyd IF 模型被切换到固定方差调度。训练脚本将为您更新完全训练模型的调度器配置。但是,当您加载保存的 LoRA 权重时,还必须更新管道的调度器配置。 + +```py +from diffusers import DiffusionPipeline + +pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", use_safetensors=True) + +pipe.load_lora_weights("") + +# 更新调度器配置为固定方差调度 +pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small") +``` + +第二阶段模型需要额外的验证图像进行放大。您可以下载并使用训练图像的缩小版本。 + +```py +from huggingface_hub import snapshot_download + +local_dir = "./dog_downsized" +snapshot_download( + "diffusers/dog-example-downsized", + local_dir=local_dir, + repo_type="dataset", + ignore_patterns=".gitattributes", +) +``` + +以下代码示例简要概述了如何结合 DreamBooth 和 LoRA 训练 DeepFloyd IF 模型。一些需要注意的重要参数包括: + +* `--resolution=64`,需要更小的分辨率,因为 DeepFloyd IF 是 +一个像素扩散模型,用于处理未压缩的像素,输入图像必须更小 +* `--pre_compute_text_embeddings`,提前计算文本嵌入以节省内存,因为 [`~transformers.T5Model`] 可能占用大量内存 +* `--tokenizer_max_length=77`,您可以使用更长的默认文本长度与 T5 作为文本编码器,但默认模型编码过程使用较短的文本长度 +* `--text_encoder_use_attention_mask`,将注意力掩码传递给文本编码器 + + + + +使用 LoRA 和 DreamBooth 训练 DeepFloyd IF 的第 1 阶段需要约 28GB 内存。 + +```bash +export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" +export INSTANCE_DIR="dog" +export OUTPUT_DIR="dreambooth_dog_lora" + +accelerate launch train_dreambooth_lora.py \ + --report_to wandb \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --instance_data_dir=$INSTANCE_DIR \ + --output_dir=$OUTPUT_DIR \ + --instance_prompt="a sks dog" \ + --resolution=64 \ + --train_batch_size=4 \ + --gradient_accumulation_steps=1 \ + --learning_rate=5e-6 \ + --scale_lr \ + --max_train_steps=1200 \ + --validation_prompt="a sks dog" \ + --validation_epochs=25 \ + --checkpointing_steps=100 \ + --pre_compute_text_embeddings \ + --tokenizer_max_length=77 \ + --text_encoder_use_attention_mask +``` + + + + +对于使用 LoRA 和 DreamBooth 的 DeepFloyd IF 第 2 阶段,请注意这些参数: + +* `--validation_images`,验证期间用于上采样的图像 +* `--class_labels_conditioning=timesteps`,根据需要额外条件化 UNet,如第 2 阶段中所需 +* `--learning_rate=1e-6`,与第 1 阶段相比使用较低的学习率 +* `--resolution=256`,上采样器的预期分辨率 + +```bash +export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" +export INSTANCE_DIR="dog" +export OUTPUT_DIR="dreambooth_dog_upscale" +export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" + +python train_dreambooth_lora.py \ + --report_to wandb \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --instance_data_dir=$INSTANCE_DIR \ + --output_dir=$OUTPUT_DIR \ + --instance_prompt="a sks dog" \ + --resolution=256 \ + --train_batch_size=4 \ + --gradient_accumulation_steps=1 \ + --learning_rate=1e-6 \ + --max_train_steps=2000 \ + --validation_prompt="a sks dog" \ + --validation_epochs=100 \ + --checkpointing_steps=500 \ + --pre_compute_text_embeddings \ + --tokenizer_max_length=77 \ + --text_encoder_use_attention_mask \ + --validation_images $VALIDATION_IMAGES \ + --class_labels_conditioning=timesteps +``` + + + + +对于使用 DreamBooth 的 DeepFloyd IF 第 1 阶段,请注意这些参数: + +* `--skip_save_text_encoder`,跳过保存完整 T5 文本编码器与微调模型 +* `--use_8bit_adam`,使用 8 位 Adam 优化器以节省内存,因为 + +优化器状态的大小在训练完整模型时 +* `--learning_rate=1e-7`,对于完整模型训练应使用非常低的学习率,否则模型质量会下降(您可以使用更高的学习率和更大的批次大小) + +使用8位Adam和批次大小为4进行训练,完整模型可以在约48GB内存下训练。 + +```bash +export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" +export INSTANCE_DIR="dog" +export OUTPUT_DIR="dreambooth_if" + +accelerate launch train_dreambooth.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --instance_data_dir=$INSTANCE_DIR \ + --output_dir=$OUTPUT_DIR \ + --instance_prompt="a photo of sks dog" \ + --resolution=64 \ + --train_batch_size=4 \ + --gradient_accumulation_steps=1 \ + --learning_rate=1e-7 \ + --max_train_steps=150 \ + --validation_prompt "a photo of sks dog" \ + --validation_steps 25 \ + --text_encoder_use_attention_mask \ + --tokenizer_max_length 77 \ + --pre_compute_text_embeddings \ + --use_8bit_adam \ + --set_grads_to_none \ + --skip_save_text_encoder \ + --push_to_hub +``` + + + + +对于DeepFloyd IF的第二阶段DreamBooth,请注意这些参数: + +* `--learning_rate=5e-6`,使用较低的学习率和较小的有效批次大小 +* `--resolution=256`,上采样器的预期分辨率 +* `--train_batch_size=2` 和 `--gradient_accumulation_steps=6`,为了有效训练包含面部的图像,需要更大的批次大小 + +```bash +export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" +export INSTANCE_DIR="dog" +export OUTPUT_DIR="dreambooth_dog_upscale" +export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" + +accelerate launch train_dreambooth.py \ + --report_to wandb \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --instance_data_dir=$INSTANCE_DIR \ + --output_dir=$OUTPUT_DIR \ + --instance_prompt="a sks dog" \ + --resolution=256 \ + --train_batch_size=2 \ + --gradient_accumulation_steps=6 \ + --learning_rate=5e-6 \ + --max_train_steps=2000 \ + --validation_prompt="a sks dog" \ + --validation_steps=150 \ + --checkpointing_steps=500 \ + --pre_compute_text_embeddings \ + --tokenizer_max_length=77 \ + --text_encoder_use_attention_mask \ + --validation_images $VALIDATION_IMAGES \ + --class_labels_conditioning timesteps \ + --push_to_hub +``` + + + + +### 训练技巧 + +训练DeepFloyd IF模型可能具有挑战性,但以下是我们发现有用的技巧: + +- LoRA对于训练第一阶段模型已足够,因为模型的低分辨率使得表示更精细的细节变得困难,无论如何。 +- 对于常见或简单的对象,您不一定需要微调上采样器。确保传递给上采样器的提示被调整以移除实例提示中的新令牌。例如,如果您第一阶段提示是"a sks dog",那么您第二阶段的提示应该是"a dog"。 +- 对于更精细的细节,如面部,完全训练 +使用阶段2上采样器比使用LoRA训练阶段2模型更好。使用更大的批次大小和较低的学习率也有帮助。 +- 应使用较低的学习率来训练阶段2模型。 +- [`DDPMScheduler`] 比训练脚本中使用的DPMSolver效果更好。 + +## 下一步 + +恭喜您训练了您的DreamBooth模型!要了解更多关于如何使用您的新模型的信息,以下指南可能有所帮助: +- 如果您使用LoRA训练了您的模型,请学习如何[加载DreamBooth](../using-diffusers/loading_adapters)模型进行推理。 \ No newline at end of file diff --git a/docs/source/zh/training/instructpix2pix.md b/docs/source/zh/training/instructpix2pix.md new file mode 100644 index 0000000000..b1b616366a --- /dev/null +++ b/docs/source/zh/training/instructpix2pix.md @@ -0,0 +1,255 @@ + + +# InstructPix2Pix + +[InstructPix2Pix](https://hf.co/papers/2211.09800) 是一个基于 Stable Diffusion 训练的模型,用于根据人类提供的指令编辑图像。例如,您的提示可以是“将云变成雨天”,模型将相应编辑输入图像。该模型以文本提示(或编辑指令)和输入图像为条件。 + +本指南将探索 [train_instruct_pix2pix.py](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) 训练脚本,帮助您熟悉它,以及如何将其适应您自己的用例。 + +在运行脚本之前,请确保从源代码安装库: + +```bash +git clone https://github.com/huggingface/diffusers +cd diffusers +pip install . +``` + +然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项: + +```bash +cd examples/instruct_pix2pix +pip install -r requirements.txt +``` + + + +🤗 Accelerate 是一个库,用于帮助您在多个 GPU/TPU 上或使用混合精度进行训练。它将根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate [快速导览](https://huggingface.co/docs/accelerate/quicktour) 以了解更多信息。 + + + +初始化一个 🤗 Accelerate 环境: + +```bash +accelerate config +``` + +要设置一个默认的 🤗 Accelerate 环境,无需选择任何配置: + +```bash +accelerate config default +``` + +或者,如果您的环境不支持交互式 shell,例如笔记本,您可以使用: + +```py +from accelerate.utils import write_basic_config + +write_basic_config() +``` + +最后,如果您想在自己的数据集上训练模型,请查看 [创建用于训练的数据集](create_dataset) 指南,了解如何创建与训练脚本兼容的数据集。 + + + +以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读 [脚本](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py),并告诉我们如果您有任何问题或疑虑。 + + + +## 脚本参数 + +训练脚本有许多参数可帮助您自定义训练运行。所有 +参数及其描述可在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L65) 函数中找到。大多数参数都提供了默认值,这些值效果相当不错,但如果您愿意,也可以在训练命令中设置自己的值。 + +例如,要增加输入图像的分辨率: + +```bash +accelerate launch train_instruct_pix2pix.py \ + --resolution=512 \ +``` + +许多基本和重要的参数在 [文本到图像](text2image#script-parameters) 训练指南中已有描述,因此本指南仅关注与 InstructPix2Pix 相关的参数: + +- `--original_image_column`:编辑前的原始图像 +- `--edited_image_column`:编辑后的图像 +- `--edit_prompt_column`:编辑图像的指令 +- `--conditioning_dropout_prob`:训练期间编辑图像和编辑提示的 dropout 概率,这为一种或两种条件输入启用了无分类器引导(CFG) + +## 训练脚本 + +数据集预处理代码和训练循环可在 [`main()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L374) 函数中找到。这是您将修改训练脚本以适应自己用例的地方。 + +与脚本参数类似,[文本到图像](text2image#training-script) 训练指南提供了训练脚本的逐步说明。相反,本指南将查看脚本中与 InstructPix2Pix 相关的部分。 + +脚本首先修改 UNet 的第一个卷积层中的 [输入通道数](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L445),以适应 InstructPix2Pix 的额外条件图像: + +```py +in_channels = 8 +out_channels = unet.conv_in.out_channels +unet.register_to_config(in_channels=in_channels) + +with torch.no_grad(): + new_conv_in = nn.Conv2d( + in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding + ) + new_conv_in.weight.zero_() + new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight) + unet.conv_in = new_conv_in +``` + +这些 UNet 参数由优化器 [更新](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L545C1-L551C6): + +```py +optimizer = optimizer_cls( + unet.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, +) +``` + +接下来,编辑后的图像和编辑指令被 [预处理](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L624)并被[tokenized](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L610C24-L610C24)。重要的是,对原始图像和编辑后的图像应用相同的图像变换。 + +```py +def preprocess_train(examples): + preprocessed_images = preprocess_images(examples) + + original_images, edited_images = preprocessed_images.chunk(2) + original_images = original_images.reshape(-1, 3, args.resolution, args.resolution) + edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution) + + examples["original_pixel_values"] = original_images + examples["edited_pixel_values"] = edited_images + + captions = list(examples[edit_prompt_column]) + examples["input_ids"] = tokenize_captions(captions) + return examples +``` + +最后,在[训练循环](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L730)中,它首先将编辑后的图像编码到潜在空间: + +```py +latents = vae.encode(batch["edited_pixel_values"].to(weight_dtype)).latent_dist.sample() +latents = latents * vae.config.scaling_factor +``` + +然后,脚本对原始图像和编辑指令嵌入应用 dropout 以支持 CFG(Classifier-Free Guidance)。这使得模型能够调节编辑指令和原始图像对编辑后图像的影响。 + +```py +encoder_hidden_states = text_encoder(batch["input_ids"])[0] +original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode() + +if args.conditioning_dropout_prob is not None: + random_p = torch.rand(bsz, device=latents.device, generator=generator) + prompt_mask = random_p < 2 * args.conditioning_dropout_prob + prompt_mask = prompt_mask.reshape(bsz, 1, 1) + null_conditioning = text_encoder(tokenize_captions([""]).to(accelerator.device))[0] + encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states) + + image_mask_dtype = original_image_embeds.dtype + image_mask = 1 - ( + (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype) + * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype) + ) + image_mask = image_mask.reshape(bsz, 1, 1, 1) + original_image_embeds = image_mask * original_image_embeds +``` + +差不多就是这样了!除了这里描述的不同之处,脚本的其余部分与[文本到图像](text2image#training-script)训练脚本非常相似,所以请随意查看以获取更多细节。如果您想了解更多关于训练循环如何工作的信息,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。 + +## 启动脚本 + +一旦您对脚本的更改感到满意,或者如果您对默认配置没问题,您 +准备好启动训练脚本!🚀 + +本指南使用 [fusing/instructpix2pix-1000-samples](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) 数据集,这是 [原始数据集](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) 的一个较小版本。您也可以创建并使用自己的数据集(请参阅 [创建用于训练的数据集](create_dataset) 指南)。 + +将 `MODEL_NAME` 环境变量设置为模型名称(可以是 Hub 上的模型 ID 或本地模型的路径),并将 `DATASET_ID` 设置为 Hub 上数据集的名称。脚本会创建并保存所有组件(特征提取器、调度器、文本编码器、UNet 等)到您的仓库中的一个子文件夹。 + + + +为了获得更好的结果,尝试使用更大的数据集进行更长时间的训练。我们只在较小规模的数据集上测试过此训练脚本。 + +
+ +要使用 Weights and Biases 监控训练进度,请将 `--report_to=wandb` 参数添加到训练命令中,并使用 `--val_image_url` 指定验证图像,使用 `--validation_prompt` 指定验证提示。这对于调试模型非常有用。 + +
+ +如果您在多个 GPU 上训练,请将 `--multi_gpu` 参数添加到 `accelerate launch` 命令中。 + +```bash +accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_ID \ + --enable_xformers_memory_efficient_attention \ + --resolution=256 \ + --random_flip \ + --train_batch_size=4 \ + --gradient_accumulation_steps=4 \ + --gradient_checkpointing \ + --max_train_steps=15000 \ + --checkpointing_steps=5000 \ + --checkpoints_total_limit=1 \ + --learning_rate=5e-05 \ + --max_grad_norm=1 \ + --lr_warmup_steps=0 \ + --conditioning_dropout_prob=0.05 \ + --mixed_precision=fp16 \ + --seed=42 \ + --push_to_hub +``` + +训练完成后,您可以使用您的新 InstructPix2Pix 进行推理: + +```py +import PIL +import requests +import torch +from diffusers import StableDiffusionInstructPix2PixPipeline +from diffusers.utils import load_image + +pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained("your_cool_model", torch_dtype=torch.float16).to("cuda") +generator = torch.Generator("cuda").manual_seed(0) + +image = load_image("https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png") +prompt = "add some ducks to the lake" +num_inference_steps = 20 +image_guidance_scale = 1.5 +guidance_scale = 10 + +edited_image = pipeline( + prompt, + image=image, + num_inference_steps=num_inference_steps, + image_guidance_scale=image_guidance_scale, + guidance_scale=guidance_scale, + generator=generator, +).images[0] +edited_image.save("edited_image.png") +``` + +您应该尝试不同的 `num_inference_steps`、`image_guidance_scale` 和 `guidance_scale` 值,以查看它们如何影响推理速度和质量。指导比例参数 +这些参数尤其重要,因为它们控制原始图像和编辑指令对编辑后图像的影响程度。 + +## Stable Diffusion XL + +Stable Diffusion XL (SDXL) 是一个强大的文本到图像模型,能够生成高分辨率图像,并在其架构中添加了第二个文本编码器。使用 [`train_instruct_pix2pix_sdxl.py`](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py) 脚本来训练 SDXL 模型以遵循图像编辑指令。 + +SDXL 训练脚本在 [SDXL 训练](sdxl) 指南中有更详细的讨论。 + +## 后续步骤 + +恭喜您训练了自己的 InstructPix2Pix 模型!🥳 要了解更多关于该模型的信息,可能有助于: + +- 阅读 [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd) 博客文章,了解更多我们使用 InstructPix2Pix 进行的一些实验、数据集准备以及不同指令的结果。 \ No newline at end of file diff --git a/docs/source/zh/training/kandinsky.md b/docs/source/zh/training/kandinsky.md new file mode 100644 index 0000000000..8da5c0c3a0 --- /dev/null +++ b/docs/source/zh/training/kandinsky.md @@ -0,0 +1,328 @@ + + +# Kandinsky 2.2 + + + +此脚本是实验性的,容易过拟合并遇到灾难性遗忘等问题。尝试探索不同的超参数以在您的数据集上获得最佳结果。 + + + +Kandinsky 2.2 是一个多语言文本到图像模型,能够生成更逼真的图像。该模型包括一个图像先验模型,用于从文本提示创建图像嵌入,以及一个解码器模型,基于先验模型的嵌入生成图像。这就是为什么在 Diffusers 中您会找到两个独立的脚本用于 Kandinsky 2.2,一个用于训练先验模型,另一个用于训练解码器模型。您可以分别训练这两个模型,但为了获得最佳结果,您应该同时训练先验和解码器模型。 + +根据您的 GPU,您可能需要启用 `gradient_checkpointing`(⚠️ 不支持先验模型!)、`mixed_precision` 和 `gradient_accumulation_steps` 来帮助将模型装入内存并加速训练。您可以通过启用 [xFormers](../optimization/xformers) 的内存高效注意力来进一步减少内存使用(版本 [v0.0.16](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212) 在某些 GPU 上训练时失败,因此您可能需要安装开发版本)。 + +本指南探讨了 [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py) 和 [train_text_to_image_decoder.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py) 脚本,以帮助您更熟悉它,以及如何根据您的用例进行调整。 + +在运行脚本之前,请确保从源代码安装库: + +```bash +git clone https://github.com/huggingface/diffusers +cd diffusers +pip install . +``` + +然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项: + +```bash +cd examples/kandinsky2_2/text_to_image +pip install -r requirements.txt +``` + + + +🤗 Accelerate 是一个帮助您在多个 GPU/TPU 上或使用混合精度进行训练的库。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate 的 [快速入门](https://huggingface.co/docs/accelerate/quicktour +) 了解更多。 + + + +初始化一个 🤗 Accelerate 环境: + +```bash +accelerate config +``` + +要设置一个默认的 🤗 Accelerate 环境而不选择任何配置: + +```bash +accelerate config default +``` + +或者,如果您的环境不支持交互式 shell,比如 notebook,您可以使用: + +```py +from accelerate.utils import write_basic_config + +write_basic_config() +``` + +最后,如果您想在自己的数据集上训练模型,请查看 [创建用于训练的数据集](create_dataset) 指南,了解如何创建与训练脚本兼容的数据集。 + + + +以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读脚本,并让我们知道您有任何疑问或顾虑。 + + + +## 脚本参数 + +训练脚本提供了许多参数来帮助您自定义训练运行。所有参数及其描述都可以在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L190) 函数中找到。训练脚本为每个参数提供了默认值,例如训练批次大小和学习率,但如果您愿意,也可以在训练命令中设置自己的值。 + +例如,要使用 fp16 格式的混合精度加速训练,请在训练命令中添加 `--mixed_precision` 参数: + +```bash +accelerate launch train_text_to_image_prior.py \ + --mixed_precision="fp16" +``` + +大多数参数与 [文本到图像](text2image#script-parameters) 训练指南中的参数相同,所以让我们直接进入 Kandinsky 训练脚本的 walkthrough! + +### Min-SNR 加权 + +[Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略可以通过重新平衡损失来帮助训练,实现更快的收敛。训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,但 Min-SNR 与两种预测类型都兼容。此加权策略仅由 PyTorch 支持,在 Flax 训练脚本中不可用。 + +添加 `--snr_gamma` 参数并将其设置为推荐值 5.0: + +```bash +accelerate launch train_text_to_image_prior.py \ + --snr_gamma=5.0 +``` + +## 训练脚本 + +训练脚本也类似于 [文本到图像](text2image#training-script) 训练指南,但已修改以支持训练 prior 和 decoder 模型。本指南重点介绍 Kandinsky 2.2 训练脚本中独特的代码。 + + + + +[`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L441) 函数包含代码 f +或准备数据集和训练模型。 + +您会立即注意到的主要区别之一是,训练脚本除了调度器和分词器外,还加载了一个 [`~transformers.CLIPImageProcessor`] 用于预处理图像,以及一个 [`~transformers.CLIPVisionModelWithProjection`] 模型用于编码图像: + +```py +noise_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2", prediction_type="sample") +image_processor = CLIPImageProcessor.from_pretrained( + args.pretrained_prior_model_name_or_path, subfolder="image_processor" +) +tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="tokenizer") + +with ContextManagers(deepspeed_zero_init_disabled_context_manager()): + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype + ).eval() + text_encoder = CLIPTextModelWithProjection.from_pretrained( + args.pretrained_prior_model_name_or_path, subfolder="text_encoder", torch_dtype=weight_dtype + ).eval() +``` + +Kandinsky 使用一个 [`PriorTransformer`] 来生成图像嵌入,因此您需要设置优化器来学习先验模型的参数。 + +```py +prior = PriorTransformer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior") +prior.train() +optimizer = optimizer_cls( + prior.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, +) +``` + +接下来,输入标题被分词,图像由 [`~transformers.CLIPImageProcessor`] [预处理](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L632): + +```py +def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values + examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples) + return examples +``` + +最后,[训练循环](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L718) 将输入图像转换为潜在表示,向图像嵌入添加噪声,并进行预测: + +```py +model_pred = prior( + noisy_latents, + timestep=timesteps, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, +).predicted_image_embedding +``` + +如果您想了解更多关于训练循环的工作原理,请查看 [理解管道、模型和调度器](../using-diffusers/write_own_pipeline) 教程,该教程分解了去噪过程的基本模式。 + + + + +The [`main()`](https://github.com/huggingface/di +ffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L440) 函数包含准备数据集和训练模型的代码。 + +与之前的模型不同,解码器初始化一个 [`VQModel`] 来将潜在变量解码为图像,并使用一个 [`UNet2DConditionModel`]: + +```py +with ContextManagers(deepspeed_zero_init_disabled_context_manager()): + vae = VQModel.from_pretrained( + args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype + ).eval() + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype + ).eval() +unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") +``` + +接下来,脚本包括几个图像变换和一个用于对图像应用变换并返回像素值的[预处理](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L622)函数: + +```py +def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + examples["pixel_values"] = [train_transforms(image) for image in images] + examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values + return examples +``` + +最后,[训练循环](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L706)处理将图像转换为潜在变量、添加噪声和预测噪声残差。 + +如果您想了解更多关于训练循环如何工作的信息,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。 + +```py +model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4] +``` + + + + +## 启动脚本 + +一旦您完成了所有更改或接受默认配置,就可以启动训练脚本了!🚀 + +您将在[Naruto BLIP 字幕](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions)数据集上进行训练,以生成您自己的Naruto角色,但您也可以通过遵循[创建用于训练的数据集](create_dataset)指南来创建和训练您自己的数据集。将环境变量 `DATASET_NAME` 设置为Hub上数据集的名称,或者如果您在自己的文件上训练,将环境变量 `TRAIN_DIR` 设置为数据集的路径。 + +如果您在多个GPU上训练,请在 `accelerate launch` 命令中添加 `--multi_gpu` 参数。 + + + +要使用Weights & Biases监控训练进度,请在训练命令中添加 `--report_to=wandb` 参数。您还需要 +建议在训练命令中添加 `--validation_prompt` 以跟踪结果。这对于调试模型和查看中间结果非常有用。 + + + + + + +```bash +export DATASET_NAME="lambdalabs/naruto-blip-captions" + +accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \ + --dataset_name=$DATASET_NAME \ + --resolution=768 \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --checkpoints_total_limit=3 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --validation_prompts="A robot naruto, 4k photo" \ + --report_to="wandb" \ + --push_to_hub \ + --output_dir="kandi2-prior-naruto-model" +``` + + + + +```bash +export DATASET_NAME="lambdalabs/naruto-blip-captions" + +accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \ + --dataset_name=$DATASET_NAME \ + --resolution=768 \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 \ + --gradient_checkpointing \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --checkpoints_total_limit=3 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --validation_prompts="A robot naruto, 4k photo" \ + --report_to="wandb" \ + --push_to_hub \ + --output_dir="kandi2-decoder-naruto-model" +``` + + + + +训练完成后,您可以使用新训练的模型进行推理! + + + + +```py +from diffusers import AutoPipelineForText2Image, DiffusionPipeline +import torch + +prior_pipeline = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16) +prior_components = {"prior_" + k: v for k,v in prior_pipeline.components.items()} +pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16) + +pipe.enable_model_cpu_offload() +prompt="A robot naruto, 4k photo" +image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[0] +``` + + + +可以随意将 `kandinsky-community/kandinsky-2-2-decoder` 替换为您自己训练的 decoder 检查点! + + + + + + +```py +from diffusers import AutoPipelineForText2Image +import torch + +pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16) +pipeline.enable_model_cpu_offload() + +prompt="A robot naruto, 4k photo" +image = pipeline(prompt=prompt).images[0] +``` + +对于 decoder 模型,您还可以从保存的检查点进行推理,这对于查看中间结果很有用。在这种情况下,将检查点加载到 UNet 中: + +```py +from diffusers import AutoPipelineForText2Image, UNet2DConditionModel + +unet = UNet2DConditionModel.from_pretrained("path/to/saved/model" + "/checkpoint-/unet") + +pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16) +pipeline.enable_model_cpu_offload() + +image = pipeline(prompt="A robot naruto, 4k photo").images[0] +``` + + + + +## 后续步骤 + +恭喜您训练了一个 Kandinsky 2.2 模型!要了解更多关于如何使用您的新模型的信息,以下指南可能会有所帮助: + +- 阅读 [Kandinsky](../using-diffusers/kandinsky) 指南,学习如何将其用于各种不同的任务(文本到图像、图像到图像、修复、插值),以及如何与 ControlNet 结合使用。 +- 查看 [DreamBooth](dreambooth) 和 [LoRA](lora) 训练指南,学习如何使用少量示例图像训练个性化的 Kandinsky 模型。这两种训练技术甚至可以结合使用! \ No newline at end of file diff --git a/docs/source/zh/training/wuerstchen.md b/docs/source/zh/training/wuerstchen.md new file mode 100644 index 0000000000..8a6abe6624 --- /dev/null +++ b/docs/source/zh/training/wuerstchen.md @@ -0,0 +1,191 @@ + + +# Wuerstchen + +[Wuerstchen](https://hf.co/papers/2306.00637) 模型通过将潜在空间压缩 42 倍,在不影响图像质量的情况下大幅降低计算成本并加速推理。在训练过程中,Wuerstchen 使用两个模型(VQGAN + 自动编码器)来压缩潜在表示,然后第三个模型(文本条件潜在扩散模型)在这个高度压缩的空间上进行条件化以生成图像。 + +为了将先验模型放入 GPU 内存并加速训练,尝试分别启用 `gradient_accumulation_steps`、`gradient_checkpointing` 和 `mixed_precision`。 + +本指南探讨 [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_prior.py) 脚本,帮助您更熟悉它,以及如何根据您的用例进行适配。 + +在运行脚本之前,请确保从源代码安装库: + +```bash +git clone https://github.com/huggingface/diffusers +cd diffusers +pip install . +``` + +然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项: + +```bash +cd examples/wuerstchen/text_to_image +pip install -r requirements.txt +``` + + + +🤗 Accelerate 是一个帮助您在多个 GPU/TPU 上或使用混合精度进行训练的库。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour) 以了解更多信息。 + + + +初始化一个 🤗 Accelerate 环境: + +```bash +accelerate config +``` + +要设置一个默认的 🤗 Accelerate 环境而不选择任何配置: + +```bash +accelerate config default +``` + +或者,如果您的环境不支持交互式 shell,例如笔记本,您可以使用: + +```py +from accelerate.utils import write_basic_config + +write_basic_config() +``` + +最后,如果您想在自己的数据集上训练模型,请查看 [创建训练数据集](create_dataset) 指南,了解如何创建与训练脚本兼容的数据集。 + + + +以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未涵盖 [脚本](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_prior.py) 的详细信息。如果您有兴趣了解更多,请随时阅读脚本,并告诉我们您是否有任何问题或疑虑。 + + + +## 脚本参数 + +训练脚本提供了许多参数来帮助您自定义训练运行。所有参数及其描述都可以在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L192) 函数中找到。它为每个参数提供了默认值,例如训练批次大小和学习率,但如果您愿意,也可以在训练命令中设置自己的值。 + +例如,要使用 fp16 格式的混合精度加速训练,请在训练命令中添加 `--mixed_precision` 参数: + +```bash +accelerate launch train_text_to_image_prior.py \ + --mixed_precision="fp16" +``` + +大多数参数与 [文本到图像](text2image#script-parameters) 训练指南中的参数相同,因此让我们直接深入 Wuerstchen 训练脚本! + +## 训练脚本 + +训练脚本也与 [文本到图像](text2image#training-script) 训练指南类似,但已修改以支持 Wuerstchen。本指南重点介绍 Wuerstchen 训练脚本中独特的代码。 + +[`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L441) 函数首先初始化图像编码器 - 一个 [EfficientNet](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/modeling_efficient_net_encoder.py) - 以及通常的调度器和分词器。 + +```py +with ContextManagers(deepspeed_zero_init_disabled_context_manager()): + pretrained_checkpoint_file = hf_hub_download("dome272/wuerstchen", filename="model_v2_stage_b.pt") + state_dict = torch.load(pretrained_checkpoint_file, map_location="cpu") + image_encoder = EfficientNetEncoder() + image_encoder.load_state_dict(state_dict["effnet_state_dict"]) + image_encoder.eval() +``` + +您还将加载 [`WuerstchenPrior`] 模型以进行优化。 + +```py +prior = WuerstchenPrior.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior") + +optimizer = optimizer_cls( + prior.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, +) +``` + +接下来,您将对图像应用一些 [transforms](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L656) 并对标题进行 [tokenize](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L637): + +```py +def preprocess_train(examples): + images = [image.conver +t("RGB") for image in examples[image_column]] + examples["effnet_pixel_values"] = [effnet_transforms(image) for image in images] + examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples) + return examples +``` + +最后,[训练循环](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L656)处理使用`EfficientNetEncoder`将图像压缩到潜在空间,向潜在表示添加噪声,并使用[`WuerstchenPrior`]模型预测噪声残差。 + +```py +pred_noise = prior(noisy_latents, timesteps, prompt_embeds) +``` + +如果您想了解更多关于训练循环的工作原理,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。 + +## 启动脚本 + +一旦您完成了所有更改或对默认配置满意,就可以启动训练脚本了!🚀 + +设置`DATASET_NAME`环境变量为Hub中的数据集名称。本指南使用[Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions)数据集,但您也可以创建和训练自己的数据集(参见[创建用于训练的数据集](create_dataset)指南)。 + + + +要使用Weights & Biases监控训练进度,请在训练命令中添加`--report_to=wandb`参数。您还需要在训练命令中添加`--validation_prompt`以跟踪结果。这对于调试模型和查看中间结果非常有用。 + + + +```bash +export DATASET_NAME="lambdalabs/naruto-blip-captions" + +accelerate launch train_text_to_image_prior.py \ + --mixed_precision="fp16" \ + --dataset_name=$DATASET_NAME \ + --resolution=768 \ + --train_batch_size=4 \ + --gradient_accumulation_steps=4 \ + --gradient_checkpointing \ + --dataloader_num_workers=4 \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --checkpoints_total_limit=3 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --validation_prompts="A robot naruto, 4k photo" \ + --report_to="wandb" \ + --push_to_hub \ + --output_dir="wuerstchen-prior-naruto-model" +``` + +训练完成后,您可以使用新训练的模型进行推理! + +```py +import torch +from diffusers import AutoPipelineForText2Image +from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS + +pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16).to("cuda") + +caption = "A cute bird naruto holding a shield" +images = pipeline( + caption, + width=1024, + height=1536, + prior_timesteps=DEFAULT_STAGE_C_TIMESTEPS, + prior_guidance_scale=4.0, + num_images_per_prompt=2, +).images +``` + +## 下一步 + +恭喜您训练了一个Wuerstchen模型!要了解更多关于如何使用您的新模型的信息,请参 +以下内容可能有所帮助: + +- 查看 [Wuerstchen](../api/pipelines/wuerstchen#text-to-image-generation) API 文档,了解更多关于如何使用该管道进行文本到图像生成及其限制的信息。 \ No newline at end of file From bb1d9a8b7523819b1846053616ddfecc3b857f6b Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Thu, 21 Aug 2025 09:45:04 -0700 Subject: [PATCH 04/74] [docs] Optimized code snippets (#12200) add space --- docs/source/en/quicktour.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/en/quicktour.md b/docs/source/en/quicktour.md index 5d4b9012c0..1ccc8eeadc 100644 --- a/docs/source/en/quicktour.md +++ b/docs/source/en/quicktour.md @@ -162,6 +162,9 @@ Take a look at the [Quantization](./quantization/overview) section for more deta ## Optimizations +> [!TIP] +> Optimization is dependent on hardware specs such as memory. Use this [Space](https://huggingface.co/spaces/diffusers/optimized-diffusers-code) to generate code examples that include all of Diffusers' available memory and speed optimization techniques for any model you're using. + Modern diffusion models are very large and have billions of parameters. The iterative denoising process is also computationally intensive and slow. Diffusers provides techniques for reducing memory usage and boosting inference speed. These techniques can be combined with quantization to optimize for both memory usage and inference speed. ### Memory usage From e62804ffbdf70ecc437321d6895f53880e5810a7 Mon Sep 17 00:00:00 2001 From: Yao Matrix Date: Thu, 21 Aug 2025 20:30:32 -0700 Subject: [PATCH 05/74] enable bria integration test on xpu, passed (#12214) Signed-off-by: YAO Matrix --- tests/pipelines/bria/test_pipeline_bria.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pipelines/bria/test_pipeline_bria.py b/tests/pipelines/bria/test_pipeline_bria.py index e6dec4ddc0..b290160a65 100644 --- a/tests/pipelines/bria/test_pipeline_bria.py +++ b/tests/pipelines/bria/test_pipeline_bria.py @@ -28,10 +28,10 @@ from diffusers import ( ) from diffusers.pipelines.bria import BriaPipeline from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, - require_accelerator, - require_torch_gpu, + require_torch_accelerator, slow, torch_device, ) @@ -149,7 +149,7 @@ class BriaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): assert (output_height, output_width) == (expected_height, expected_width) @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") - @require_accelerator + @require_torch_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -237,7 +237,7 @@ class BriaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): @slow -@require_torch_gpu +@require_torch_accelerator class BriaPipelineSlowTests(unittest.TestCase): pipeline_class = BriaPipeline repo_id = "briaai/BRIA-3.2" @@ -245,12 +245,12 @@ class BriaPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, seed=0): generator = torch.Generator(device="cpu").manual_seed(seed) From d03240801f2ac2b4d1f49584c1c5628b98583f6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C6=B0=C6=A1ng=20=C4=90=C3=ACnh=20Minh?= <119489204+vuongminh1907@users.noreply.github.com> Date: Fri, 22 Aug 2025 14:04:28 +0700 Subject: [PATCH 06/74] [Docs] Add documentation for KontextInpaintingPipeline (#12197) * [Docs] Add documentation for KontextInpaintingPipeline * Update docs/source/en/api/pipelines/flux.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * update kontext inpaint docs with hfoption * Update docs/source/en/api/pipelines/flux.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/api/pipelines/flux.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/en/api/pipelines/flux.md | 73 ++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/docs/source/en/api/pipelines/flux.md b/docs/source/en/api/pipelines/flux.md index 64341ca4b9..bb72758222 100644 --- a/docs/source/en/api/pipelines/flux.md +++ b/docs/source/en/api/pipelines/flux.md @@ -316,6 +316,67 @@ if integrity_checker.test_image(image_): raise ValueError("Your image has been flagged. Choose another prompt/image or try again.") ``` +### Kontext Inpainting +`FluxKontextInpaintPipeline` enables image modification within a fixed mask region. It currently supports both text-based conditioning and image-reference conditioning. + + + + +```python +import torch +from diffusers import FluxKontextInpaintPipeline +from diffusers.utils import load_image + +prompt = "Change the yellow dinosaur to green one" +img_url = ( + "https://github.com/ZenAI-Vietnam/Flux-Kontext-pipelines/blob/main/assets/dinosaur_input.jpeg?raw=true" +) +mask_url = ( + "https://github.com/ZenAI-Vietnam/Flux-Kontext-pipelines/blob/main/assets/dinosaur_mask.png?raw=true" +) + +source = load_image(img_url) +mask = load_image(mask_url) + +pipe = FluxKontextInpaintPipeline.from_pretrained( + "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16 +) +pipe.to("cuda") + +image = pipe(prompt=prompt, image=source, mask_image=mask, strength=1.0).images[0] +image.save("kontext_inpainting_normal.png") +``` + + + +```python +import torch +from diffusers import FluxKontextInpaintPipeline +from diffusers.utils import load_image + +pipe = FluxKontextInpaintPipeline.from_pretrained( + "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16 +) +pipe.to("cuda") + +prompt = "Replace this ball" +img_url = "https://images.pexels.com/photos/39362/the-ball-stadion-football-the-pitch-39362.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500" +mask_url = "https://github.com/ZenAI-Vietnam/Flux-Kontext-pipelines/blob/main/assets/ball_mask.png?raw=true" +image_reference_url = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTah3x6OL_ECMBaZ5ZlJJhNsyC-OSMLWAI-xw&s" + +source = load_image(img_url) +mask = load_image(mask_url) +image_reference = load_image(image_reference_url) + +mask = pipe.mask_processor.blur(mask, blur_factor=12) +image = pipe( + prompt=prompt, image=source, mask_image=mask, image_reference=image_reference, strength=1.0 +).images[0] +image.save("kontext_inpainting_ref.png") +``` + + + ## Combining Flux Turbo LoRAs with Flux Control, Fill, and Redux We can combine Flux Turbo LoRAs with Flux Control and other pipelines like Fill and Redux to enable few-steps' inference. The example below shows how to do that for Flux Control LoRA for depth and turbo LoRA from [`ByteDance/Hyper-SD`](https://hf.co/ByteDance/Hyper-SD). @@ -646,3 +707,15 @@ image.save("flux-fp8-dev.png") [[autodoc]] FluxFillPipeline - all - __call__ + +## FluxKontextPipeline + +[[autodoc]] FluxKontextPipeline + - all + - __call__ + +## FluxKontextInpaintPipeline + +[[autodoc]] FluxKontextInpaintPipeline + - all + - __call__ \ No newline at end of file From 3e73dc24a45ecb6309813b47b9e2aaeaade586d1 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 22 Aug 2025 10:42:13 -0700 Subject: [PATCH 07/74] [docs] Community pipelines (#12201) * refresh * feedback --- docs/source/en/_toctree.yml | 2 +- .../custom_pipeline_overview.md | 397 ++++-------------- 2 files changed, 93 insertions(+), 306 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index dd0193a3a8..42558b636c 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -17,7 +17,7 @@ - local: tutorials/autopipeline title: AutoPipeline - local: using-diffusers/custom_pipeline_overview - title: Load community pipelines and components + title: Community pipelines and components - local: using-diffusers/callback title: Pipeline callbacks - local: using-diffusers/reusing_seeds diff --git a/docs/source/en/using-diffusers/custom_pipeline_overview.md b/docs/source/en/using-diffusers/custom_pipeline_overview.md index bfe48d28be..b087e57056 100644 --- a/docs/source/en/using-diffusers/custom_pipeline_overview.md +++ b/docs/source/en/using-diffusers/custom_pipeline_overview.md @@ -10,376 +10,163 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Load community pipelines and components - [[open-in-colab]] -## Community pipelines +# Community pipelines and components -> [!TIP] Take a look at GitHub Issue [#841](https://github.com/huggingface/diffusers/issues/841) for more context about why we're adding community pipelines to help everyone easily share their work without being slowed down. - -Community pipelines are any [`DiffusionPipeline`] class that are different from the original paper implementation (for example, the [`StableDiffusionControlNetPipeline`] corresponds to the [Text-to-Image Generation with ControlNet Conditioning](https://huggingface.co/papers/2302.05543) paper). They provide additional functionality or extend the original implementation of a pipeline. - -There are many cool community pipelines like [Marigold Depth Estimation](https://github.com/huggingface/diffusers/tree/main/examples/community#marigold-depth-estimation) or [InstantID](https://github.com/huggingface/diffusers/tree/main/examples/community#instantid-pipeline), and you can find all the official community pipelines [here](https://github.com/huggingface/diffusers/tree/main/examples/community). - -There are two types of community pipelines, those stored on the Hugging Face Hub and those stored on Diffusers GitHub repository. Hub pipelines are completely customizable (scheduler, models, pipeline code, etc.) while Diffusers GitHub pipelines are only limited to custom pipeline code. - -| | GitHub community pipeline | HF Hub community pipeline | -|----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| -| usage | same | same | -| review process | open a Pull Request on GitHub and undergo a review process from the Diffusers team before merging; may be slower | upload directly to a Hub repository without any review; this is the fastest workflow | -| visibility | included in the official Diffusers repository and documentation | included on your HF Hub profile and relies on your own usage/promotion to gain visibility | - - - - -To load a Hugging Face Hub community pipeline, pass the repository id of the community pipeline to the `custom_pipeline` argument and the model repository where you'd like to load the pipeline weights and components from. For example, the example below loads a dummy pipeline from [hf-internal-testing/diffusers-dummy-pipeline](https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py) and the pipeline weights and components from [google/ddpm-cifar10-32](https://huggingface.co/google/ddpm-cifar10-32): - -> [!WARNING] -> By loading a community pipeline from the Hugging Face Hub, you are trusting that the code you are loading is safe. Make sure to inspect the code online before loading and running it automatically! - -```py -from diffusers import DiffusionPipeline - -pipeline = DiffusionPipeline.from_pretrained( - "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline", use_safetensors=True -) -``` - - - - -To load a GitHub community pipeline, pass the repository id of the community pipeline to the `custom_pipeline` argument and the model repository where you you'd like to load the pipeline weights and components from. You can also load model components directly. The example below loads the community [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) pipeline and the CLIP model components. - -```py -from diffusers import DiffusionPipeline -from transformers import CLIPImageProcessor, CLIPModel - -clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" - -feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) -clip_model = CLIPModel.from_pretrained(clip_model_id) - -pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", - custom_pipeline="clip_guided_stable_diffusion", - clip_model=clip_model, - feature_extractor=feature_extractor, - use_safetensors=True, -) -``` - - - - -### Load from a local file - -Community pipelines can also be loaded from a local file if you pass a file path instead. The path to the passed directory must contain a pipeline.py file that contains the pipeline class. - -```py -pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", - custom_pipeline="./path/to/pipeline_directory/", - clip_model=clip_model, - feature_extractor=feature_extractor, - use_safetensors=True, -) -``` - -### Load from a specific version - -By default, community pipelines are loaded from the latest stable version of Diffusers. To load a community pipeline from another version, use the `custom_revision` parameter. - - - - -For example, to load from the main branch: - -```py -pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", - custom_pipeline="clip_guided_stable_diffusion", - custom_revision="main", - clip_model=clip_model, - feature_extractor=feature_extractor, - use_safetensors=True, -) -``` - - - - -For example, to load from a previous version of Diffusers like v0.25.0: - -```py -pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", - custom_pipeline="clip_guided_stable_diffusion", - custom_revision="v0.25.0", - clip_model=clip_model, - feature_extractor=feature_extractor, - use_safetensors=True, -) -``` - - - - -### Load with from_pipe - -Community pipelines can also be loaded with the [`~DiffusionPipeline.from_pipe`] method which allows you to load and reuse multiple pipelines without any additional memory overhead (learn more in the [Reuse a pipeline](./loading#reuse-a-pipeline) guide). The memory requirement is determined by the largest single pipeline loaded. - -For example, let's load a community pipeline that supports [long prompts with weighting](https://github.com/huggingface/diffusers/tree/main/examples/community#long-prompt-weighting-stable-diffusion) from a Stable Diffusion pipeline. - -```py -import torch -from diffusers import DiffusionPipeline - -pipe_sd = DiffusionPipeline.from_pretrained("emilianJR/CyberRealistic_V3", torch_dtype=torch.float16) -pipe_sd.to("cuda") -# load long prompt weighting pipeline -pipe_lpw = DiffusionPipeline.from_pipe( - pipe_sd, - custom_pipeline="lpw_stable_diffusion", -).to("cuda") - -prompt = "cat, hiding in the leaves, ((rain)), zazie rainyday, beautiful eyes, macro shot, colorful details, natural lighting, amazing composition, subsurface scattering, amazing textures, filmic, soft light, ultra-detailed eyes, intricate details, detailed texture, light source contrast, dramatic shadows, cinematic light, depth of field, film grain, noise, dark background, hyperrealistic dslr film still, dim volumetric cinematic lighting" -neg_prompt = "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation" -generator = torch.Generator(device="cpu").manual_seed(20) -out_lpw = pipe_lpw( - prompt, - negative_prompt=neg_prompt, - width=512, - height=512, - max_embeddings_multiples=3, - num_inference_steps=50, - generator=generator, - ).images[0] -out_lpw -``` - -
-
- -
Stable Diffusion with long prompt weighting
-
-
- -
Stable Diffusion
-
-
- -## Example community pipelines - -Community pipelines are a really fun and creative way to extend the capabilities of the original pipeline with new and unique features. You can find all community pipelines in the [diffusers/examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) folder with inference and training examples for how to use them. - -This section showcases a couple of the community pipelines and hopefully it'll inspire you to create your own (feel free to open a PR for your community pipeline and ping us for a review)! +Community pipelines are [`DiffusionPipeline`] classes that are different from the original paper implementation. They provide additional functionality or extend the original pipeline implementation. > [!TIP] -> The [`~DiffusionPipeline.from_pipe`] method is particularly useful for loading community pipelines because many of them don't have pretrained weights and add a feature on top of an existing pipeline like Stable Diffusion or Stable Diffusion XL. You can learn more about the [`~DiffusionPipeline.from_pipe`] method in the [Load with from_pipe](custom_pipeline_overview#load-with-from_pipe) section. +> Check out the community pipelines in [diffusers/examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) with inference and training examples for how to use them. - - +Community pipelines are either stored on the Hub or the Diffusers' GitHub repository. Hub pipelines are completely customizable (scheduler, models, pipeline code, etc.) while GitHub pipelines are limited to only the custom pipeline code. Further compare the two community pipeline types in the table below. -[Marigold](https://marigoldmonodepth.github.io/) is a depth estimation diffusion pipeline that uses the rich existing and inherent visual knowledge in diffusion models. It takes an input image and denoises and decodes it into a depth map. Marigold performs well even on images it hasn't seen before. +| | GitHub | Hub | +|---|---|---| +| Usage | Same. | Same. | +| Review process | Open a Pull Request on GitHub and undergo a review process from the Diffusers team before merging. This option is slower. | Upload directly to a Hub repository without a review. This is the fastest option. | +| Visibility | Included in the official Diffusers repository and docs. | Included on your Hub profile and relies on your own usage and promotion to gain visibility. | + +## custom_pipeline + +Load either community pipeline types by passing the `custom_pipeline` argument to [`~DiffusionPipeline.from_pretrained`]. ```py import torch -from PIL import Image from diffusers import DiffusionPipeline -from diffusers.utils import load_image pipeline = DiffusionPipeline.from_pretrained( - "prs-eth/marigold-lcm-v1-0", - custom_pipeline="marigold_depth_estimation", + "stabilityai/stable-diffusion-3-medium-diffusers", + custom_pipeline="pipeline_stable_diffusion_3_instruct_pix2pix", torch_dtype=torch.float16, - variant="fp16", + device_map="cuda" ) - -pipeline.to("cuda") -image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/community-marigold.png") -output = pipeline( - image, - denoising_steps=4, - ensemble_size=5, - processing_res=768, - match_input_res=True, - batch_size=0, - seed=33, - color_map="Spectral", - show_progress_bar=True, -) -depth_colored: Image.Image = output.depth_colored -depth_colored.save("./depth_colored.png") ``` -
-
- -
original image
-
-
- -
colorized depth image
-
-
- -
- - -[HD-Painter](https://hf.co/papers/2312.14091) is a high-resolution inpainting pipeline. It introduces a *Prompt-Aware Introverted Attention (PAIntA)* layer to better align a prompt with the area to be inpainted, and *Reweighting Attention Score Guidance (RASG)* to keep the latents more prompt-aligned and within their trained domain to generate realistc images. +Add the `custom_revision` argument to [`~DiffusionPipeline.from_pretrained`] to load a community pipeline from a specific version (for example, `v0.30.0` or `main`). By default, community pipelines are loaded from the latest stable version of Diffusers. ```py import torch -from diffusers import DiffusionPipeline, DDIMScheduler -from diffusers.utils import load_image +from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5-inpainting", - custom_pipeline="hd_painter" + "stabilityai/stable-diffusion-3-medium-diffusers", + custom_pipeline="pipeline_stable_diffusion_3_instruct_pix2pix", + custom_revision="main" + torch_dtype=torch.float16, + device_map="cuda" ) -pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hd-painter.jpg") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hd-painter-mask.png") -prompt = "football" -image = pipeline(prompt, init_image, mask_image, use_rasg=True, use_painta=True, generator=torch.manual_seed(0)).images[0] -image ``` -
-
- -
original image
-
-
- -
generated image
-
-
+> [!WARNING] +> While the Hugging Face Hub [scans](https://huggingface.co/docs/hub/security-malware) files, you should still inspect the Hub pipeline code and make sure it is safe. -
-
+There are a few ways to load a community pipeline. + +- Pass a path to `custom_pipeline` to load a local community pipeline. The directory must contain a `pipeline.py` file containing the pipeline class. + + ```py + import torch + from diffusers import DiffusionPipeline + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", + custom_pipeline="path/to/pipeline_directory", + torch_dtype=torch.float16, + device_map="cuda" + ) + ``` + +- The `custom_pipeline` argument is also supported by [`~DiffusionPipeline.from_pipe`], which is useful for [reusing pipelines](./loading#reuse-a-pipeline) without using additional memory. It limits the memory usage to only the largest pipeline loaded. + + ```py + import torch + from diffusers import DiffusionPipeline + + pipeline_sd = DiffusionPipeline.from_pretrained("emilianJR/CyberRealistic_V3", torch_dtype=torch.float16, device_map="cuda") + pipeline_lpw = DiffusionPipeline.from_pipe( + pipeline_sd, custom_pipeline="lpw_stable_diffusion", device_map="cuda" + ) + ``` + + The [`~DiffusionPipeline.from_pipe`] method is especially useful for loading community pipelines because many of them don't have pretrained weights. Community pipelines generally add a feature on top of an existing pipeline. ## Community components -Community components allow users to build pipelines that may have customized components that are not a part of Diffusers. If your pipeline has custom components that Diffusers doesn't already support, you need to provide their implementations as Python modules. These customized components could be a VAE, UNet, and scheduler. In most cases, the text encoder is imported from the Transformers library. The pipeline code itself can also be customized. +Community components let users build pipelines with custom transformers, UNets, VAEs, and schedulers not supported by Diffusers. These components require Python module implementations. -This section shows how users should use community components to build a community pipeline. +This section shows how users can use community components to build a community pipeline using [showlab/show-1-base](https://huggingface.co/showlab/show-1-base) as an example. -You'll use the [showlab/show-1-base](https://huggingface.co/showlab/show-1-base) pipeline checkpoint as an example. - -1. Import and load the text encoder from Transformers: - -```python -from transformers import T5Tokenizer, T5EncoderModel - -pipe_id = "showlab/show-1-base" -tokenizer = T5Tokenizer.from_pretrained(pipe_id, subfolder="tokenizer") -text_encoder = T5EncoderModel.from_pretrained(pipe_id, subfolder="text_encoder") -``` - -2. Load a scheduler: +1. Load the required components, the scheduler and image processor. The text encoder is generally imported from [Transformers](https://huggingface.co/docs/transformers/index). ```python +from transformers import T5Tokenizer, T5EncoderModel, CLIPImageProcessor from diffusers import DPMSolverMultistepScheduler +pipeline_id = "showlab/show-1-base" +tokenizer = T5Tokenizer.from_pretrained(pipeline_id, subfolder="tokenizer") +text_encoder = T5EncoderModel.from_pretrained(pipeline_id, subfolder="text_encoder") scheduler = DPMSolverMultistepScheduler.from_pretrained(pipe_id, subfolder="scheduler") -``` - -3. Load an image processor: - -```python -from transformers import CLIPImageProcessor - feature_extractor = CLIPImageProcessor.from_pretrained(pipe_id, subfolder="feature_extractor") ``` - +> [!WARNING] +> In steps 2 and 3, the custom [UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py) and [pipeline](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) implementation must match the format shown in their files for this example to work. -In steps 4 and 5, the custom [UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py) and [pipeline](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) implementation must match the format shown in their files for this example to work. - - - -4. Now you'll load a [custom UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py), which in this example, has already been implemented in [showone_unet_3d_condition.py](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) for your convenience. You'll notice the [`UNet3DConditionModel`] class name is changed to `ShowOneUNet3DConditionModel` because [`UNet3DConditionModel`] already exists in Diffusers. Any components needed for the `ShowOneUNet3DConditionModel` class should be placed in showone_unet_3d_condition.py. - - Once this is done, you can initialize the UNet: - - ```python - from showone_unet_3d_condition import ShowOneUNet3DConditionModel - - unet = ShowOneUNet3DConditionModel.from_pretrained(pipe_id, subfolder="unet") - ``` - -5. Finally, you'll load the custom pipeline code. For this example, it has already been created for you in [pipeline_t2v_base_pixel.py](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/pipeline_t2v_base_pixel.py). This script contains a custom `TextToVideoIFPipeline` class for generating videos from text. Just like the custom UNet, any code needed for the custom pipeline to work should go in pipeline_t2v_base_pixel.py. - -Once everything is in place, you can initialize the `TextToVideoIFPipeline` with the `ShowOneUNet3DConditionModel`: +2. Load a [custom UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py) which is already implemented in [showone_unet_3d_condition.py](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py). The [`UNet3DConditionModel`] class name is renamed to the custom implementation, `ShowOneUNet3DConditionModel`, because [`UNet3DConditionModel`] already exists in Diffusers. Any components required for `ShowOneUNet3DConditionModel` class should be placed in `showone_unet_3d_condition.py`. + +```python +from showone_unet_3d_condition import ShowOneUNet3DConditionModel + +unet = ShowOneUNet3DConditionModel.from_pretrained(pipeline_id, subfolder="unet") +``` + +3. Load the custom pipeline code (already implemented in [pipeline_t2v_base_pixel.py](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/pipeline_t2v_base_pixel.py)). This script contains a custom `TextToVideoIFPipeline` class for generating videos from text. Like the custom UNet, any code required for `TextToVideIFPipeline` should be placed in `pipeline_t2v_base_pixel.py`. + +Initialize `TextToVideoIFPipeline` with `ShowOneUNet3DConditionModel`. ```python -from pipeline_t2v_base_pixel import TextToVideoIFPipeline import torch +from pipeline_t2v_base_pixel import TextToVideoIFPipeline pipeline = TextToVideoIFPipeline( unet=unet, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, - feature_extractor=feature_extractor + feature_extractor=feature_extractor, + device_map="cuda", + torch_dtype=torch.float16 ) -pipeline = pipeline.to(device="cuda") -pipeline.torch_dtype = torch.float16 ``` -Push the pipeline to the Hub to share with the community! +4. Push the pipeline to the Hub to share with the community. ```python pipeline.push_to_hub("custom-t2v-pipeline") ``` -After the pipeline is successfully pushed, you need to make a few changes: +After the pipeline is successfully pushed, make the following changes. -1. Change the `_class_name` attribute in [model_index.json](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/model_index.json#L2) to `"pipeline_t2v_base_pixel"` and `"TextToVideoIFPipeline"`. -2. Upload `showone_unet_3d_condition.py` to the [unet](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) subfolder. -3. Upload `pipeline_t2v_base_pixel.py` to the pipeline [repository](https://huggingface.co/sayakpaul/show-1-base-with-code/tree/main). +- Change the `_class_name` attribute in [model_index.json](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/model_index.json#L2) to `"pipeline_t2v_base_pixel"` and `"TextToVideoIFPipeline"`. +- Upload `showone_unet_3d_condition.py` to the [unet](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) subfolder. +- Upload `pipeline_t2v_base_pixel.py` to the pipeline [repository](https://huggingface.co/sayakpaul/show-1-base-with-code/tree/main). To run inference, add the `trust_remote_code` argument while initializing the pipeline to handle all the "magic" behind the scenes. -> [!WARNING] -> As an additional precaution with `trust_remote_code=True`, we strongly encourage you to pass a commit hash to the `revision` parameter in [`~DiffusionPipeline.from_pretrained`] to make sure the code hasn't been updated with some malicious new lines of code (unless you fully trust the model owners). - ```python -from diffusers import DiffusionPipeline import torch +from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "/", trust_remote_code=True, torch_dtype=torch.float16 -).to("cuda") - -prompt = "hello" - -# Text embeds -prompt_embeds, negative_embeds = pipeline.encode_prompt(prompt) - -# Keyframes generation (8x64x40, 2fps) -video_frames = pipeline( - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_embeds, - num_frames=8, - height=40, - width=64, - num_inference_steps=2, - guidance_scale=9.0, - output_type="pt" -).frames -``` - -As an additional reference, take a look at the repository structure of [stabilityai/japanese-stable-diffusion-xl](https://huggingface.co/stabilityai/japanese-stable-diffusion-xl/) which also uses the `trust_remote_code` feature. - -```python -from diffusers import DiffusionPipeline -import torch - -pipeline = DiffusionPipeline.from_pretrained( - "stabilityai/japanese-stable-diffusion-xl", trust_remote_code=True ) -pipeline.to("cuda") ``` + +> [!WARNING] +> As an additional precaution with `trust_remote_code=True`, we strongly encourage passing a commit hash to the `revision` argument in [`~DiffusionPipeline.from_pretrained`] to make sure the code hasn't been updated with new malicious code (unless you fully trust the model owners). + +## Resources + +- Take a look at Issue [#841](https://github.com/huggingface/diffusers/issues/841) for more context about why we're adding community pipelines to help everyone easily share their work without being slowed down. +- Check out the [stabilityai/japanese-stable-diffusion-xl](https://huggingface.co/stabilityai/japanese-stable-diffusion-xl/) repository for an additional example of a community pipeline that also uses the `trust_remote_code` feature. \ No newline at end of file From b60faf456bf93ff0454ed1691ff2f9dc6aecf362 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 22 Aug 2025 13:01:24 -0700 Subject: [PATCH 08/74] [docs] Pipeline callbacks (#12212) * init * review --- docs/source/en/api/pipelines/overview.md | 14 ++ docs/source/en/using-diffusers/callback.md | 241 +++------------------ 2 files changed, 43 insertions(+), 212 deletions(-) diff --git a/docs/source/en/api/pipelines/overview.md b/docs/source/en/api/pipelines/overview.md index f34262d37c..b5e3825fef 100644 --- a/docs/source/en/api/pipelines/overview.md +++ b/docs/source/en/api/pipelines/overview.md @@ -113,3 +113,17 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an ## PushToHubMixin [[autodoc]] utils.PushToHubMixin + +## Callbacks + +[[autodoc]] callbacks.PipelineCallback + +[[autodoc]] callbacks.SDCFGCutoffCallback + +[[autodoc]] callbacks.SDXLCFGCutoffCallback + +[[autodoc]] callbacks.SDXLControlnetCFGCutoffCallback + +[[autodoc]] callbacks.IPAdapterScaleCutoffCallback + +[[autodoc]] callbacks.SD3CFGCutoffCallback diff --git a/docs/source/en/using-diffusers/callback.md b/docs/source/en/using-diffusers/callback.md index e0fa885784..60b839805f 100644 --- a/docs/source/en/using-diffusers/callback.md +++ b/docs/source/en/using-diffusers/callback.md @@ -12,52 +12,37 @@ specific language governing permissions and limitations under the License. # Pipeline callbacks -The denoising loop of a pipeline can be modified with custom defined functions using the `callback_on_step_end` parameter. The callback function is executed at the end of each step, and modifies the pipeline attributes and variables for the next step. This is really useful for *dynamically* adjusting certain pipeline attributes or modifying tensor variables. This versatility allows for interesting use cases such as changing the prompt embeddings at each timestep, assigning different weights to the prompt embeddings, and editing the guidance scale. With callbacks, you can implement new features without modifying the underlying code! +A callback is a function that modifies [`DiffusionPipeline`] behavior and it is executed at the end of a denoising step. The changes are propagated to subsequent steps in the denoising process. It is useful for adjusting pipeline attributes or tensor variables to support new features without rewriting the underlying pipeline code. -> [!TIP] -> 🤗 Diffusers currently only supports `callback_on_step_end`, but feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) if you have a cool use-case and require a callback function with a different execution point! +Diffusers provides several callbacks in the pipeline [overview](../api/pipelines/overview#callbacks). -This guide will demonstrate how callbacks work by a few features you can implement with them. +To enable a callback, configure when the callback is executed after a certain number of denoising steps with one of the following arguments. -## Official callbacks +- `cutoff_step_ratio` specifies when a callback is activated as a percentage of the total denoising steps. +- `cutoff_step_index` specifies the exact step number a callback is activated. -We provide a list of callbacks you can plug into an existing pipeline and modify the denoising loop. This is the current list of official callbacks: +The example below uses `cutoff_step_ratio=0.4`, which means the callback is activated once denoising reaches 40% of the total inference steps. [`~callbacks.SDXLCFGCutoffCallback`] disables classifier-free guidance (CFG) after a certain number of steps, which can help save compute without significantly affecting performance. -- `SDCFGCutoffCallback`: Disables the CFG after a certain number of steps for all SD 1.5 pipelines, including text-to-image, image-to-image, inpaint, and controlnet. -- `SDXLCFGCutoffCallback`: Disables the CFG after a certain number of steps for all SDXL pipelines, including text-to-image, image-to-image, inpaint, and controlnet. -- `IPAdapterScaleCutoffCallback`: Disables the IP Adapter after a certain number of steps for all pipelines supporting IP-Adapter. +Define a callback with either of the `cutoff` arguments and pass it to the `callback_on_step_end` parameter in the pipeline. -> [!TIP] -> If you want to add a new official callback, feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) or [submit a PR](https://huggingface.co/docs/diffusers/main/en/conceptual/contribution#how-to-open-a-pr). - -To set up a callback, you need to specify the number of denoising steps after which the callback comes into effect. You can do so by using either one of these two arguments - -- `cutoff_step_ratio`: Float number with the ratio of the steps. -- `cutoff_step_index`: Integer number with the exact number of the step. - -```python +```py import torch - from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline from diffusers.callbacks import SDXLCFGCutoffCallback - callback = SDXLCFGCutoffCallback(cutoff_step_ratio=0.4) -# can also be used with cutoff_step_index +# if using cutoff_step_index # callback = SDXLCFGCutoffCallback(cutoff_step_ratio=None, cutoff_step_index=10) pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, - variant="fp16", -).to("cuda") + device_map="cuda" +) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, use_karras_sigmas=True) prompt = "a sports car at the road, best quality, high quality, high detail, 8k resolution" - -generator = torch.Generator(device="cpu").manual_seed(2628670641) - -out = pipeline( +output = pipeline( prompt=prompt, negative_prompt="", guidance_scale=6.5, @@ -65,83 +50,16 @@ out = pipeline( generator=generator, callback_on_step_end=callback, ) - -out.images[0].save("official_callback.png") ``` -
-
- generated image of a sports car at the road -
without SDXLCFGCutoffCallback
-
-
- generated image of a sports car at the road with cfg callback -
with SDXLCFGCutoffCallback
-
-
+If you want to add a new official callback, feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) or [submit a PR](https://huggingface.co/docs/diffusers/main/en/conceptual/contribution#how-to-open-a-pr). Otherwise, you can also create your own callback as shown below. -## Dynamic classifier-free guidance +## Early stopping -Dynamic classifier-free guidance (CFG) is a feature that allows you to disable CFG after a certain number of inference steps which can help you save compute with minimal cost to performance. The callback function for this should have the following arguments: - -- `pipeline` (or the pipeline instance) provides access to important properties such as `num_timesteps` and `guidance_scale`. You can modify these properties by updating the underlying attributes. For this example, you'll disable CFG by setting `pipeline._guidance_scale=0.0`. -- `step_index` and `timestep` tell you where you are in the denoising loop. Use `step_index` to turn off CFG after reaching 40% of `num_timesteps`. -- `callback_kwargs` is a dict that contains tensor variables you can modify during the denoising loop. It only includes variables specified in the `callback_on_step_end_tensor_inputs` argument, which is passed to the pipeline's `__call__` method. Different pipelines may use different sets of variables, so please check a pipeline's `_callback_tensor_inputs` attribute for the list of variables you can modify. Some common variables include `latents` and `prompt_embeds`. For this function, change the batch size of `prompt_embeds` after setting `guidance_scale=0.0` in order for it to work properly. - -Your callback function should look something like this: - -```python -def callback_dynamic_cfg(pipe, step_index, timestep, callback_kwargs): - # adjust the batch_size of prompt_embeds according to guidance_scale - if step_index == int(pipeline.num_timesteps * 0.4): - prompt_embeds = callback_kwargs["prompt_embeds"] - prompt_embeds = prompt_embeds.chunk(2)[-1] - - # update guidance_scale and prompt_embeds - pipeline._guidance_scale = 0.0 - callback_kwargs["prompt_embeds"] = prompt_embeds - return callback_kwargs -``` - -Now, you can pass the callback function to the `callback_on_step_end` parameter and the `prompt_embeds` to `callback_on_step_end_tensor_inputs`. +Early stopping is useful if you aren't happy with the intermediate results during generation. This callback sets a hardcoded stop point after which the pipeline terminates by setting the `_interrupt` attribute to `True`. ```py -import torch -from diffusers import StableDiffusionPipeline - -pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16) -pipeline = pipeline.to("cuda") - -prompt = "a photo of an astronaut riding a horse on mars" - -generator = torch.Generator(device="cuda").manual_seed(1) -out = pipeline( - prompt, - generator=generator, - callback_on_step_end=callback_dynamic_cfg, - callback_on_step_end_tensor_inputs=['prompt_embeds'] -) - -out.images[0].save("out_custom_cfg.png") -``` - -## Interrupt the diffusion process - -> [!TIP] -> The interruption callback is supported for text-to-image, image-to-image, and inpainting for the [StableDiffusionPipeline](../api/pipelines/stable_diffusion/overview) and [StableDiffusionXLPipeline](../api/pipelines/stable_diffusion/stable_diffusion_xl). - -Stopping the diffusion process early is useful when building UIs that work with Diffusers because it allows users to stop the generation process if they're unhappy with the intermediate results. You can incorporate this into your pipeline with a callback. - -This callback function should take the following arguments: `pipeline`, `i`, `t`, and `callback_kwargs` (this must be returned). Set the pipeline's `_interrupt` attribute to `True` to stop the diffusion process after a certain number of steps. You are also free to implement your own custom stopping logic inside the callback. - -In this example, the diffusion process is stopped after 10 steps even though `num_inference_steps` is set to 50. - -```python -from diffusers import StableDiffusionPipeline - -pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") -pipeline.enable_model_cpu_offload() -num_inference_steps = 50 +from diffusers import StableDiffusionXLPipeline def interrupt_callback(pipeline, i, t, callback_kwargs): stop_idx = 10 @@ -150,6 +68,11 @@ def interrupt_callback(pipeline, i, t, callback_kwargs): return callback_kwargs +pipeline = StableDiffusionXLPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5" +) +num_inference_steps = 50 + pipeline( "A photo of a cat", num_inference_steps=num_inference_steps, @@ -157,92 +80,11 @@ pipeline( ) ``` -## IP Adapter Cutoff +## Display intermediate images -IP Adapter is an image prompt adapter that can be used for diffusion models without any changes to the underlying model. We can use the IP Adapter Cutoff Callback to disable the IP Adapter after a certain number of steps. To set up the callback, you need to specify the number of denoising steps after which the callback comes into effect. You can do so by using either one of these two arguments: +Visualizing the intermediate images is useful for progress monitoring and assessing the quality of the generated content. This callback decodes the latent tensors at each step and converts them to images. -- `cutoff_step_ratio`: Float number with the ratio of the steps. -- `cutoff_step_index`: Integer number with the exact number of the step. - -We need to download the diffusion model and load the ip_adapter for it as follows: - -```py -from diffusers import AutoPipelineForText2Image -from diffusers.utils import load_image -import torch - -pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") -pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") -pipeline.set_ip_adapter_scale(0.6) -``` -The setup for the callback should look something like this: - -```py - -from diffusers import AutoPipelineForText2Image -from diffusers.callbacks import IPAdapterScaleCutoffCallback -from diffusers.utils import load_image -import torch - - -pipeline = AutoPipelineForText2Image.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - torch_dtype=torch.float16 -).to("cuda") - - -pipeline.load_ip_adapter( - "h94/IP-Adapter", - subfolder="sdxl_models", - weight_name="ip-adapter_sdxl.bin" -) - -pipeline.set_ip_adapter_scale(0.6) - - -callback = IPAdapterScaleCutoffCallback( - cutoff_step_ratio=None, - cutoff_step_index=5 -) - -image = load_image( - "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_diner.png" -) - -generator = torch.Generator(device="cuda").manual_seed(2628670641) - -images = pipeline( - prompt="a tiger sitting in a chair drinking orange juice", - ip_adapter_image=image, - negative_prompt="deformed, ugly, wrong proportion, low res, bad anatomy, worst quality, low quality", - generator=generator, - num_inference_steps=50, - callback_on_step_end=callback, -).images - -images[0].save("custom_callback_img.png") -``` - -
-
- generated image of a tiger sitting in a chair drinking orange juice -
without IPAdapterScaleCutoffCallback
-
-
- generated image of a tiger sitting in a chair drinking orange juice with ip adapter callback -
with IPAdapterScaleCutoffCallback
-
-
- - -## Display image after each generation step - -> [!TIP] -> This tip was contributed by [asomoza](https://github.com/asomoza). - -Display an image after each generation step by accessing and converting the latents after each step into an image. The latent space is compressed to 128x128, so the images are also 128x128 which is useful for a quick preview. - -1. Use the function below to convert the SDXL latents (4 channels) to RGB tensors (3 channels) as explained in the [Explaining the SDXL latent space](https://huggingface.co/blog/TimothyAlexisVass/explaining-the-sdxl-latent-space) blog post. +[Convert](https://huggingface.co/blog/TimothyAlexisVass/explaining-the-sdxl-latent-space) the Stable Diffusion XL latents from latents (4 channels) to RGB tensors (3 tensors). ```py def latents_to_rgb(latents): @@ -260,7 +102,7 @@ def latents_to_rgb(latents): return Image.fromarray(image_array) ``` -2. Create a function to decode and save the latents into an image. +Extract the latents and convert the first image in the batch to RGB. Save the image as a PNG file with the step number. ```py def decode_tensors(pipe, step, timestep, callback_kwargs): @@ -272,19 +114,18 @@ def decode_tensors(pipe, step, timestep, callback_kwargs): return callback_kwargs ``` -3. Pass the `decode_tensors` function to the `callback_on_step_end` parameter to decode the tensors after each step. You also need to specify what you want to modify in the `callback_on_step_end_tensor_inputs` parameter, which in this case are the latents. +Use the `callback_on_step_end_tensor_inputs` parameter to specify what input type to modify, which in this case, are the latents. ```py -from diffusers import AutoPipelineForText2Image import torch from PIL import Image +from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, - variant="fp16", - use_safetensors=True -).to("cuda") + device_map="cuda" +) image = pipeline( prompt="A croissant shaped like a cute bear.", @@ -293,27 +134,3 @@ image = pipeline( callback_on_step_end_tensor_inputs=["latents"], ).images[0] ``` - -
-
- -
step 0
-
-
- -
step 19 -
-
-
- -
step 29
-
-
- -
step 39
-
-
- -
step 49
-
-
From 561ab54de3d3aaa9007e76aeb3b15e8be3ed353f Mon Sep 17 00:00:00 2001 From: "Frank (Haofan) Wang" Date: Sat, 23 Aug 2025 05:00:01 +0800 Subject: [PATCH 09/74] Support ControlNet for Qwen-Image (#12215) * support qwen-image-cn-union --------- Co-authored-by: github-actions[bot] Co-authored-by: YiYi Xu --- docs/source/en/api/pipelines/qwenimage.md | 4 + src/diffusers/__init__.py | 6 + src/diffusers/models/__init__.py | 6 + src/diffusers/models/controlnets/__init__.py | 1 + .../controlnets/controlnet_qwenimage.py | 359 +++++++ .../transformers/transformer_qwenimage.py | 8 + .../modular_pipeline_utils.py | 2 +- src/diffusers/pipelines/__init__.py | 2 + src/diffusers/pipelines/qwenimage/__init__.py | 2 + .../pipeline_qwenimage_controlnet.py | 948 ++++++++++++++++++ src/diffusers/utils/dummy_pt_objects.py | 30 + .../dummy_torch_and_transformers_objects.py | 15 + 12 files changed, 1382 insertions(+), 1 deletion(-) create mode 100644 src/diffusers/models/controlnets/controlnet_qwenimage.py create mode 100644 src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py diff --git a/docs/source/en/api/pipelines/qwenimage.md b/docs/source/en/api/pipelines/qwenimage.md index 4edfc6d4d6..518938131b 100644 --- a/docs/source/en/api/pipelines/qwenimage.md +++ b/docs/source/en/api/pipelines/qwenimage.md @@ -120,6 +120,10 @@ The `guidance_scale` parameter in the pipeline is there to support future guidan - all - __call__ +## QwenImaggeControlNetPipeline + - all + - __call__ + ## QwenImagePipelineOutput [[autodoc]] pipelines.qwenimage.pipeline_output.QwenImagePipelineOutput \ No newline at end of file diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 3f0f87b926..a606941f1d 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -218,6 +218,8 @@ else: "OmniGenTransformer2DModel", "PixArtTransformer2DModel", "PriorTransformer", + "QwenImageControlNetModel", + "QwenImageMultiControlNetModel", "QwenImageTransformer2DModel", "SanaControlNetModel", "SanaTransformer2DModel", @@ -491,6 +493,7 @@ else: "PixArtAlphaPipeline", "PixArtSigmaPAGPipeline", "PixArtSigmaPipeline", + "QwenImageControlNetPipeline", "QwenImageEditPipeline", "QwenImageImg2ImgPipeline", "QwenImageInpaintPipeline", @@ -885,6 +888,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: OmniGenTransformer2DModel, PixArtTransformer2DModel, PriorTransformer, + QwenImageControlNetModel, + QwenImageMultiControlNetModel, QwenImageTransformer2DModel, SanaControlNetModel, SanaTransformer2DModel, @@ -1128,6 +1133,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: PixArtAlphaPipeline, PixArtSigmaPAGPipeline, PixArtSigmaPipeline, + QwenImageControlNetPipeline, QwenImageEditPipeline, QwenImageImg2ImgPipeline, QwenImageInpaintPipeline, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index c432640362..49ac2a1c56 100755 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -52,6 +52,10 @@ if is_torch_available(): "HunyuanDiT2DControlNetModel", "HunyuanDiT2DMultiControlNetModel", ] + _import_structure["controlnets.controlnet_qwenimage"] = [ + "QwenImageControlNetModel", + "QwenImageMultiControlNetModel", + ] _import_structure["controlnets.controlnet_sana"] = ["SanaControlNetModel"] _import_structure["controlnets.controlnet_sd3"] = ["SD3ControlNetModel", "SD3MultiControlNetModel"] _import_structure["controlnets.controlnet_sparsectrl"] = ["SparseControlNetModel"] @@ -148,6 +152,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: HunyuanDiT2DMultiControlNetModel, MultiControlNetModel, MultiControlNetUnionModel, + QwenImageControlNetModel, + QwenImageMultiControlNetModel, SanaControlNetModel, SD3ControlNetModel, SD3MultiControlNetModel, diff --git a/src/diffusers/models/controlnets/__init__.py b/src/diffusers/models/controlnets/__init__.py index 90ef438d25..7ce352879d 100644 --- a/src/diffusers/models/controlnets/__init__.py +++ b/src/diffusers/models/controlnets/__init__.py @@ -9,6 +9,7 @@ if is_torch_available(): HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel, ) + from .controlnet_qwenimage import QwenImageControlNetModel, QwenImageMultiControlNetModel from .controlnet_sana import SanaControlNetModel from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel from .controlnet_sparsectrl import ( diff --git a/src/diffusers/models/controlnets/controlnet_qwenimage.py b/src/diffusers/models/controlnets/controlnet_qwenimage.py new file mode 100644 index 0000000000..7c4955eb58 --- /dev/null +++ b/src/diffusers/models/controlnets/controlnet_qwenimage.py @@ -0,0 +1,359 @@ +# Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import FromOriginalModelMixin, PeftAdapterMixin +from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers +from ..attention_processor import AttentionProcessor +from ..cache_utils import CacheMixin +from ..controlnets.controlnet import zero_module +from ..modeling_outputs import Transformer2DModelOutput +from ..modeling_utils import ModelMixin +from ..transformers.transformer_qwenimage import ( + QwenEmbedRope, + QwenImageTransformerBlock, + QwenTimestepProjEmbeddings, + RMSNorm, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class QwenImageControlNetOutput(BaseOutput): + controlnet_block_samples: Tuple[torch.Tensor] + + +class QwenImageControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + patch_size: int = 2, + in_channels: int = 64, + out_channels: Optional[int] = 16, + num_layers: int = 60, + attention_head_dim: int = 128, + num_attention_heads: int = 24, + joint_attention_dim: int = 3584, + axes_dims_rope: Tuple[int, int, int] = (16, 56, 56), + extra_condition_channels: int = 0, # for controlnet-inpainting + ): + super().__init__() + self.out_channels = out_channels or in_channels + self.inner_dim = num_attention_heads * attention_head_dim + + self.pos_embed = QwenEmbedRope(theta=10000, axes_dim=list(axes_dims_rope), scale_rope=True) + + self.time_text_embed = QwenTimestepProjEmbeddings(embedding_dim=self.inner_dim) + + self.txt_norm = RMSNorm(joint_attention_dim, eps=1e-6) + + self.img_in = nn.Linear(in_channels, self.inner_dim) + self.txt_in = nn.Linear(joint_attention_dim, self.inner_dim) + + self.transformer_blocks = nn.ModuleList( + [ + QwenImageTransformerBlock( + dim=self.inner_dim, + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + ) + for _ in range(num_layers) + ] + ) + + # controlnet_blocks + self.controlnet_blocks = nn.ModuleList([]) + for _ in range(len(self.transformer_blocks)): + self.controlnet_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) + self.controlnet_x_embedder = zero_module( + torch.nn.Linear(in_channels + extra_condition_channels, self.inner_dim) + ) + + self.gradient_checkpointing = False + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self): + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor() + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + @classmethod + def from_transformer( + cls, + transformer, + num_layers: int = 5, + attention_head_dim: int = 128, + num_attention_heads: int = 24, + load_weights_from_transformer=True, + extra_condition_channels: int = 0, + ): + config = dict(transformer.config) + config["num_layers"] = num_layers + config["attention_head_dim"] = attention_head_dim + config["num_attention_heads"] = num_attention_heads + config["extra_condition_channels"] = extra_condition_channels + + controlnet = cls.from_config(config) + + if load_weights_from_transformer: + controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict()) + controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict()) + controlnet.img_in.load_state_dict(transformer.img_in.state_dict()) + controlnet.txt_in.load_state_dict(transformer.txt_in.state_dict()) + controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False) + controlnet.controlnet_x_embedder = zero_module(controlnet.controlnet_x_embedder) + + return controlnet + + def forward( + self, + hidden_states: torch.Tensor, + controlnet_cond: torch.Tensor, + conditioning_scale: float = 1.0, + encoder_hidden_states: torch.Tensor = None, + encoder_hidden_states_mask: torch.Tensor = None, + timestep: torch.LongTensor = None, + img_shapes: Optional[List[Tuple[int, int, int]]] = None, + txt_seq_lens: Optional[List[int]] = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + return_dict: bool = True, + ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: + """ + The [`FluxTransformer2DModel`] forward method. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): + Input `hidden_states`. + controlnet_cond (`torch.Tensor`): + The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. + conditioning_scale (`float`, defaults to `1.0`): + The scale factor for ControlNet outputs. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): + Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. + pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected + from the embeddings of input conditions. + timestep ( `torch.LongTensor`): + Used to indicate denoising step. + block_controlnet_hidden_states: (`list` of `torch.Tensor`): + A list of tensors that if specified are added to the residuals of transformer blocks. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain + tuple. + + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + if joint_attention_kwargs is not None: + joint_attention_kwargs = joint_attention_kwargs.copy() + lora_scale = joint_attention_kwargs.pop("scale", 1.0) + else: + lora_scale = 1.0 + + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + else: + if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: + logger.warning( + "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." + ) + hidden_states = self.img_in(hidden_states) + + # add + hidden_states = hidden_states + self.controlnet_x_embedder(controlnet_cond) + + temb = self.time_text_embed(timestep, hidden_states) + + image_rotary_emb = self.pos_embed(img_shapes, txt_seq_lens, device=hidden_states.device) + + timestep = timestep.to(hidden_states.dtype) + encoder_hidden_states = self.txt_norm(encoder_hidden_states) + encoder_hidden_states = self.txt_in(encoder_hidden_states) + + block_samples = () + for index_block, block in enumerate(self.transformer_blocks): + if torch.is_grad_enabled() and self.gradient_checkpointing: + encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( + block, + hidden_states, + encoder_hidden_states, + encoder_hidden_states_mask, + temb, + image_rotary_emb, + ) + + else: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_mask=encoder_hidden_states_mask, + temb=temb, + image_rotary_emb=image_rotary_emb, + joint_attention_kwargs=joint_attention_kwargs, + ) + block_samples = block_samples + (hidden_states,) + + # controlnet block + controlnet_block_samples = () + for block_sample, controlnet_block in zip(block_samples, self.controlnet_blocks): + block_sample = controlnet_block(block_sample) + controlnet_block_samples = controlnet_block_samples + (block_sample,) + + # scaling + controlnet_block_samples = [sample * conditioning_scale for sample in controlnet_block_samples] + controlnet_block_samples = None if len(controlnet_block_samples) == 0 else controlnet_block_samples + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return controlnet_block_samples + + return QwenImageControlNetOutput( + controlnet_block_samples=controlnet_block_samples, + ) + + +class QwenImageMultiControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): + r""" + `QwenImageMultiControlNetModel` wrapper class for Multi-QwenImageControlNetModel + + This module is a wrapper for multiple instances of the `QwenImageControlNetModel`. The `forward()` API is designed + to be compatible with `QwenImageControlNetModel`. + + Args: + controlnets (`List[QwenImageControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. You must set multiple + `QwenImageControlNetModel` as a list. + """ + + def __init__(self, controlnets): + super().__init__() + self.nets = nn.ModuleList(controlnets) + + def forward( + self, + hidden_states: torch.FloatTensor, + controlnet_cond: List[torch.tensor], + conditioning_scale: List[float], + encoder_hidden_states: torch.Tensor = None, + encoder_hidden_states_mask: torch.Tensor = None, + timestep: torch.LongTensor = None, + img_shapes: Optional[List[Tuple[int, int, int]]] = None, + txt_seq_lens: Optional[List[int]] = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + return_dict: bool = True, + ) -> Union[QwenImageControlNetOutput, Tuple]: + # ControlNet-Union with multiple conditions + # only load one ControlNet for saving memories + if len(self.nets) == 1: + controlnet = self.nets[0] + + for i, (image, scale) in enumerate(zip(controlnet_cond, conditioning_scale)): + block_samples = controlnet( + hidden_states=hidden_states, + controlnet_cond=image, + conditioning_scale=scale, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_mask=encoder_hidden_states_mask, + timestep=timestep, + img_shapes=img_shapes, + txt_seq_lens=txt_seq_lens, + joint_attention_kwargs=joint_attention_kwargs, + return_dict=return_dict, + ) + + # merge samples + if i == 0: + control_block_samples = block_samples + else: + if block_samples is not None and control_block_samples is not None: + control_block_samples = [ + control_block_sample + block_sample + for control_block_sample, block_sample in zip(control_block_samples, block_samples) + ] + else: + raise ValueError("QwenImageMultiControlNetModel only supports a single controlnet-union now.") + + return control_block_samples diff --git a/src/diffusers/models/transformers/transformer_qwenimage.py b/src/diffusers/models/transformers/transformer_qwenimage.py index 3a417c4693..241ac7afcd 100644 --- a/src/diffusers/models/transformers/transformer_qwenimage.py +++ b/src/diffusers/models/transformers/transformer_qwenimage.py @@ -16,6 +16,7 @@ import functools import math from typing import Any, Dict, List, Optional, Tuple, Union +import numpy as np import torch import torch.nn as nn import torch.nn.functional as F @@ -552,6 +553,7 @@ class QwenImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, Fro txt_seq_lens: Optional[List[int]] = None, guidance: torch.Tensor = None, # TODO: this should probably be removed attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_block_samples=None, return_dict: bool = True, ) -> Union[torch.Tensor, Transformer2DModelOutput]: """ @@ -631,6 +633,12 @@ class QwenImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, Fro joint_attention_kwargs=attention_kwargs, ) + # controlnet residual + if controlnet_block_samples is not None: + interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) + interval_control = int(np.ceil(interval_control)) + hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] + # Use only the image part (hidden_states) from the dual-stream blocks hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) diff --git a/src/diffusers/modular_pipelines/modular_pipeline_utils.py b/src/diffusers/modular_pipelines/modular_pipeline_utils.py index 9118f13aa0..b151268686 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline_utils.py +++ b/src/diffusers/modular_pipelines/modular_pipeline_utils.py @@ -209,7 +209,7 @@ class ComponentSpec: # Get all loading fields in order loading_fields = cls.loading_fields() - result = {f: None for f in loading_fields} + result = dict.fromkeys(loading_fields) if load_id == "null": return result diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index de8eefd5ff..b3cfc62287 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -393,6 +393,7 @@ else: "QwenImageImg2ImgPipeline", "QwenImageInpaintPipeline", "QwenImageEditPipeline", + "QwenImageControlNetPipeline", ] try: if not is_onnx_available(): @@ -712,6 +713,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pia import PIAPipeline from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline from .qwenimage import ( + QwenImageControlNetPipeline, QwenImageEditPipeline, QwenImageImg2ImgPipeline, QwenImageInpaintPipeline, diff --git a/src/diffusers/pipelines/qwenimage/__init__.py b/src/diffusers/pipelines/qwenimage/__init__.py index 4b64474dda..bcf0911e0f 100644 --- a/src/diffusers/pipelines/qwenimage/__init__.py +++ b/src/diffusers/pipelines/qwenimage/__init__.py @@ -24,6 +24,7 @@ except OptionalDependencyNotAvailable: else: _import_structure["modeling_qwenimage"] = ["ReduxImageEncoder"] _import_structure["pipeline_qwenimage"] = ["QwenImagePipeline"] + _import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"] _import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"] _import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"] _import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"] @@ -36,6 +37,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_qwenimage import QwenImagePipeline + from .pipeline_qwenimage_controlnet import QwenImageControlNetPipeline from .pipeline_qwenimage_edit import QwenImageEditPipeline from .pipeline_qwenimage_img2img import QwenImageImg2ImgPipeline from .pipeline_qwenimage_inpaint import QwenImageInpaintPipeline diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py new file mode 100644 index 0000000000..6b383fa173 --- /dev/null +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py @@ -0,0 +1,948 @@ +# Copyright 2025 Qwen-Image Team, InstantX Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import QwenImageLoraLoaderMixin +from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel +from ...models.controlnets.controlnet_qwenimage import QwenImageControlNetModel, QwenImageMultiControlNetModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import QwenImagePipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers.utils import load_image + >>> from diffusers import QwenImageControlNetModel, QwenImageMultiControlNetModel, QwenImageControlNetPipeline + + >>> # QwenImageControlNetModel + >>> controlnet = QwenImageControlNetModel.from_pretrained( + ... "InstantX/Qwen-Image-ControlNet-Union", torch_dtype=torch.bfloat16 + ... ) + >>> pipe = QwenImageControlNetPipeline.from_pretrained( + ... "Qwen/Qwen-Image", controlnet=controlnet, torch_dtype=torch.bfloat16 + ... ) + >>> pipe.to("cuda") + >>> prompt = "Aesthetics art, traditional asian pagoda, elaborate golden accents, sky blue and white color palette, swirling cloud pattern, digital illustration, east asian architecture, ornamental rooftop, intricate detailing on building, cultural representation." + >>> negative_prompt = " " + >>> control_image = load_image( + ... "https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png" + ... ) + >>> # Depending on the variant being used, the pipeline call will slightly vary. + >>> # Refer to the pipeline documentation for more details. + >>> image = pipe( + ... prompt, + ... negative_prompt=negative_prompt, + ... control_image=control_image, + ... controlnet_conditioning_scale=1.0, + ... num_inference_steps=30, + ... true_cfg_scale=4.0, + ... ).images[0] + >>> image.save("qwenimage_cn_union.png") + + >>> # QwenImageMultiControlNetModel + >>> controlnet = QwenImageControlNetModel.from_pretrained( + ... "InstantX/Qwen-Image-ControlNet-Union", torch_dtype=torch.bfloat16 + ... ) + >>> controlnet = QwenImageMultiControlNetModel([controlnet]) + >>> pipe = QwenImageControlNetPipeline.from_pretrained( + ... "Qwen/Qwen-Image", controlnet=controlnet, torch_dtype=torch.bfloat16 + ... ) + >>> pipe.to("cuda") + >>> prompt = "Aesthetics art, traditional asian pagoda, elaborate golden accents, sky blue and white color palette, swirling cloud pattern, digital illustration, east asian architecture, ornamental rooftop, intricate detailing on building, cultural representation." + >>> negative_prompt = " " + >>> control_image = load_image( + ... "https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png" + ... ) + >>> # Depending on the variant being used, the pipeline call will slightly vary. + >>> # Refer to the pipeline documentation for more details. + >>> image = pipe( + ... prompt, + ... negative_prompt=negative_prompt, + ... control_image=[control_image, control_image], + ... controlnet_conditioning_scale=[0.5, 0.5], + ... num_inference_steps=30, + ... true_cfg_scale=4.0, + ... ).images[0] + >>> image.save("qwenimage_cn_union_multi.png") + ``` +""" + + +# Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.15, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class QwenImageControlNetPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): + r""" + The QwenImage pipeline for text-to-image generation. + + Args: + transformer ([`QwenImageTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`Qwen2.5-VL-7B-Instruct`]): + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant. + tokenizer (`QwenTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + """ + + model_cpu_offload_seq = "text_encoder->transformer->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKLQwenImage, + text_encoder: Qwen2_5_VLForConditionalGeneration, + tokenizer: Qwen2Tokenizer, + transformer: QwenImageTransformer2DModel, + controlnet: Union[QwenImageControlNetModel, QwenImageMultiControlNetModel], + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + transformer=transformer, + scheduler=scheduler, + controlnet=controlnet, + ) + self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 + # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible + # by the patch size. So the vae scale factor is multiplied by the patch size to account for this + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) + self.tokenizer_max_length = 1024 + self.prompt_template_encode = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + self.prompt_template_encode_start_idx = 34 + self.default_sample_size = 128 + + # Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.extract_masked_hidden + def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor): + bool_mask = mask.bool() + valid_lengths = bool_mask.sum(dim=1) + selected = hidden_states[bool_mask] + split_result = torch.split(selected, valid_lengths.tolist(), dim=0) + + return split_result + + # Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.get_qwen_prompt_embeds + def _get_qwen_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + + template = self.prompt_template_encode + drop_idx = self.prompt_template_encode_start_idx + txt = [template.format(e) for e in prompt] + txt_tokens = self.tokenizer( + txt, max_length=self.tokenizer_max_length + drop_idx, padding=True, truncation=True, return_tensors="pt" + ).to(self.device) + encoder_hidden_states = self.text_encoder( + input_ids=txt_tokens.input_ids, + attention_mask=txt_tokens.attention_mask, + output_hidden_states=True, + ) + hidden_states = encoder_hidden_states.hidden_states[-1] + split_hidden_states = self._extract_masked_hidden(hidden_states, txt_tokens.attention_mask) + split_hidden_states = [e[drop_idx:] for e in split_hidden_states] + attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states] + max_seq_len = max([e.size(0) for e in split_hidden_states]) + prompt_embeds = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states] + ) + encoder_attention_mask = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list] + ) + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + return prompt_embeds, encoder_attention_mask + + # Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + max_sequence_length: int = 1024, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, device) + + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1) + prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len) + + return prompt_embeds, prompt_embeds_mask + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_embeds_mask=None, + negative_prompt_embeds_mask=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: + logger.warning( + f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_embeds_mask is None: + raise ValueError( + "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`." + ) + if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 1024: + raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}") + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (vae_scale_factor * 2)) + width = 2 * (int(width) // (vae_scale_factor * 2)) + + latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), 1, height, width) + + return latents + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (self.vae_scale_factor * 2)) + width = 2 * (int(width) // (self.vae_scale_factor * 2)) + + shape = (batch_size, 1, num_channels_latents, height, width) + + if latents is not None: + return latents.to(device=device, dtype=dtype) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + + return latents + + # Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + if isinstance(image, torch.Tensor): + pass + else: + image = self.image_processor.preprocess(image, height=height, width=width) + + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def attention_kwargs(self): + return self._attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def current_timestep(self): + return self._current_timestep + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: Union[str, List[str]] = None, + true_cfg_scale: float = 4.0, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + sigmas: Optional[List[float]] = None, + guidance_scale: float = 1.0, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + control_image: PipelineImageInput = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is + not greater than `1`). + true_cfg_scale (`float`, *optional*, defaults to 1.0): + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will be generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple. + attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is a list with the generated images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(control_image) if isinstance(self.controlnet, QwenImageMultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + negative_prompt_embeds_mask=negative_prompt_embeds_mask, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._attention_kwargs = attention_kwargs + self._current_timestep = None + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + has_neg_prompt = negative_prompt is not None or ( + negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + prompt_embeds, prompt_embeds_mask = self.encode_prompt( + prompt=prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + if do_true_cfg: + negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt( + prompt=negative_prompt, + prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=negative_prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + # 3. Prepare control image + num_channels_latents = self.transformer.config.in_channels // 4 + if isinstance(self.controlnet, QwenImageControlNetModel): + control_image = self.prepare_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=self.vae.dtype, + ) + height, width = control_image.shape[-2:] + + if control_image.ndim == 4: + control_image = control_image.unsqueeze(2) + + # vae encode + self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) + latents_mean = (torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1)).to( + device + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + device + ) + + control_image = retrieve_latents(self.vae.encode(control_image), generator=generator) + control_image = (control_image - latents_mean) * latents_std + + control_image = control_image.permute(0, 2, 1, 3, 4) + + # pack + control_image = self._pack_latents( + control_image, + batch_size=control_image.shape[0], + num_channels_latents=num_channels_latents, + height=control_image.shape[3], + width=control_image.shape[4], + ).to(dtype=prompt_embeds.dtype, device=device) + + else: + if isinstance(self.controlnet, QwenImageMultiControlNetModel): + control_images = [] + for control_image_ in control_image: + control_image_ = self.prepare_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=self.vae.dtype, + ) + + height, width = control_image_.shape[-2:] + + if control_image_.ndim == 4: + control_image_ = control_image_.unsqueeze(2) + + # vae encode + self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1) + ).to(device) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view( + 1, self.vae.config.z_dim, 1, 1, 1 + ).to(device) + + control_image_ = retrieve_latents(self.vae.encode(control_image_), generator=generator) + control_image_ = (control_image_ - latents_mean) * latents_std + + control_image_ = control_image_.permute(0, 2, 1, 3, 4) + + # pack + control_image_ = self._pack_latents( + control_image_, + batch_size=control_image_.shape[0], + num_channels_latents=num_channels_latents, + height=control_image_.shape[3], + width=control_image_.shape[4], + ).to(dtype=prompt_embeds.dtype, device=device) + + control_images.append(control_image_) + + control_image = control_images + + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + img_shapes = [(1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2)] * batch_size + + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.get("base_image_seq_len", 256), + self.scheduler.config.get("max_image_seq_len", 4096), + self.scheduler.config.get("base_shift", 0.5), + self.scheduler.config.get("max_shift", 1.15), + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + sigmas=sigmas, + mu=mu, + ) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(self.controlnet, QwenImageControlNetModel) else keeps) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + + if self.attention_kwargs is None: + self._attention_kwargs = {} + + # 6. Denoising loop + self.scheduler.set_begin_index(0) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + # controlnet + controlnet_block_samples = self.controlnet( + hidden_states=latents, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + timestep=timestep / 1000, + encoder_hidden_states=prompt_embeds, + encoder_hidden_states_mask=prompt_embeds_mask, + img_shapes=img_shapes, + txt_seq_lens=prompt_embeds_mask.sum(dim=1).tolist(), + return_dict=False, + ) + + with self.transformer.cache_context("cond"): + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + encoder_hidden_states=prompt_embeds, + encoder_hidden_states_mask=prompt_embeds_mask, + img_shapes=img_shapes, + txt_seq_lens=prompt_embeds_mask.sum(dim=1).tolist(), + controlnet_block_samples=controlnet_block_samples, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + + if do_true_cfg: + with self.transformer.cache_context("uncond"): + neg_noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + encoder_hidden_states_mask=negative_prompt_embeds_mask, + encoder_hidden_states=negative_prompt_embeds, + img_shapes=img_shapes, + txt_seq_lens=negative_prompt_embeds_mask.sum(dim=1).tolist(), + controlnet_block_samples=controlnet_block_samples, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) + + cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True) + noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True) + noise_pred = comb_pred * (cond_norm / noise_norm) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + self._current_timestep = None + if output_type == "latent": + image = latents + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = latents.to(self.vae.dtype) + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.vae.config.z_dim, 1, 1, 1) + .to(latents.device, latents.dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + latents.device, latents.dtype + ) + latents = latents / latents_std + latents_mean + image = self.vae.decode(latents, return_dict=False)[0][:, :, 0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return QwenImagePipelineOutput(images=image) diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py index 20380a449f..bbb9712496 100644 --- a/src/diffusers/utils/dummy_pt_objects.py +++ b/src/diffusers/utils/dummy_pt_objects.py @@ -1083,6 +1083,36 @@ class PriorTransformer(metaclass=DummyObject): requires_backends(cls, ["torch"]) +class QwenImageControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class QwenImageMultiControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + class QwenImageTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 1885dc03bb..22dfc5fcca 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1757,6 +1757,21 @@ class PixArtSigmaPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class QwenImageControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class QwenImageEditPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] From 673d4357ff9f085f6f2cd9eebaff23fd1fd9990a Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Sat, 23 Aug 2025 04:48:32 +0530 Subject: [PATCH 10/74] add attentionmixin to qwen image (#12219) --- src/diffusers/models/transformers/transformer_qwenimage.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_qwenimage.py b/src/diffusers/models/transformers/transformer_qwenimage.py index 241ac7afcd..846add8906 100644 --- a/src/diffusers/models/transformers/transformer_qwenimage.py +++ b/src/diffusers/models/transformers/transformer_qwenimage.py @@ -25,7 +25,7 @@ from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph -from ..attention import FeedForward +from ..attention import AttentionMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..attention_processor import Attention from ..cache_utils import CacheMixin @@ -470,7 +470,9 @@ class QwenImageTransformerBlock(nn.Module): return encoder_hidden_states, hidden_states -class QwenImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): +class QwenImageTransformer2DModel( + ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin +): """ The Transformer model introduced in Qwen. From 9a7ae77a4eda5b4f819fd22ce9b713fb79993201 Mon Sep 17 00:00:00 2001 From: Aishwarya Badlani <41635755+Aishwarya0811@users.noreply.github.com> Date: Sat, 23 Aug 2025 12:22:09 +0500 Subject: [PATCH 11/74] =?UTF-8?q?Fix=20PyTorch=202.3.1=20compatibility:=20?= =?UTF-8?q?add=20version=20guard=20for=20torch.library.=E2=80=A6=20(#12206?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix PyTorch 2.3.1 compatibility: add version guard for torch.library.custom_op - Add hasattr() check for torch.library.custom_op and register_fake - These functions were added in PyTorch 2.4, causing import failures in 2.3.1 - Both decorators and functions are now properly guarded with version checks - Maintains backward compatibility while preserving functionality Fixes #12195 * Use dummy decorators approach for PyTorch version compatibility - Replace hasattr check with version string comparison - Add no-op decorator functions for PyTorch < 2.4.0 - Follows pattern from #11941 as suggested by reviewer - Maintains cleaner code structure without indentation changes * Update src/diffusers/models/attention_dispatch.py Update all the decorator usages Co-authored-by: Aryan * Update src/diffusers/models/attention_dispatch.py Co-authored-by: Aryan * Update src/diffusers/models/attention_dispatch.py Co-authored-by: Aryan * Update src/diffusers/models/attention_dispatch.py Co-authored-by: Aryan * Move version check to top of file and use private naming as requested * Apply style fixes --------- Co-authored-by: Aryan Co-authored-by: Aryan Co-authored-by: github-actions[bot] --- src/diffusers/models/attention_dispatch.py | 30 ++++++++++++++++++---- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/src/diffusers/models/attention_dispatch.py b/src/diffusers/models/attention_dispatch.py index 7cc30e47ab..6a05aac215 100644 --- a/src/diffusers/models/attention_dispatch.py +++ b/src/diffusers/models/attention_dispatch.py @@ -110,6 +110,27 @@ if _CAN_USE_XFORMERS_ATTN: else: xops = None +# Version guard for PyTorch compatibility - custom_op was added in PyTorch 2.4 +if torch.__version__ >= "2.4.0": + _custom_op = torch.library.custom_op + _register_fake = torch.library.register_fake +else: + + def custom_op_no_op(name, fn=None, /, *, mutates_args, device_types=None, schema=None): + def wrap(func): + return func + + return wrap if fn is None else fn + + def register_fake_no_op(op, fn=None, /, *, lib=None, _stacklevel=1): + def wrap(func): + return func + + return wrap if fn is None else fn + + _custom_op = custom_op_no_op + _register_fake = register_fake_no_op + logger = get_logger(__name__) # pylint: disable=invalid-name @@ -473,12 +494,11 @@ def _flex_attention_causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx): # ===== torch op registrations ===== # Registrations are required for fullgraph tracing compatibility - - -# TODO: library.custom_op and register_fake probably need version guards? # TODO: this is only required because the beta release FA3 does not have it. There is a PR adding # this but it was never merged: https://github.com/Dao-AILab/flash-attention/pull/1590 -@torch.library.custom_op("flash_attn_3::_flash_attn_forward", mutates_args=(), device_types="cuda") + + +@_custom_op("flash_attn_3::_flash_attn_forward", mutates_args=(), device_types="cuda") def _wrapped_flash_attn_3_original( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: @@ -487,7 +507,7 @@ def _wrapped_flash_attn_3_original( return out, lse -@torch.library.register_fake("flash_attn_3::_flash_attn_forward") +@_register_fake("flash_attn_3::_flash_attn_forward") def _(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, seq_len, num_heads, head_dim = query.shape lse_shape = (batch_size, seq_len, num_heads) From a840c39ad8de04b242168e24c097371ba188f0e5 Mon Sep 17 00:00:00 2001 From: Aryan Date: Sat, 23 Aug 2025 22:18:55 +0530 Subject: [PATCH 12/74] [refactor] Make guiders return their inputs (#12213) * update * update * apply review suggestions * remove guider inputs * fix tests --- src/diffusers/guiders/adaptive_projected_guidance.py | 6 +++--- src/diffusers/guiders/auto_guidance.py | 6 +++--- src/diffusers/guiders/classifier_free_guidance.py | 6 +++--- .../guiders/classifier_free_zero_star_guidance.py | 6 +++--- src/diffusers/guiders/frequency_decoupled_guidance.py | 6 +++--- src/diffusers/guiders/guider_utils.py | 8 +++++++- src/diffusers/guiders/perturbed_attention_guidance.py | 6 +++--- src/diffusers/guiders/skip_layer_guidance.py | 6 +++--- src/diffusers/guiders/smoothed_energy_guidance.py | 6 +++--- .../guiders/tangential_classifier_free_guidance.py | 6 +++--- .../modular_pipelines/stable_diffusion_xl/denoise.py | 6 ++---- src/diffusers/modular_pipelines/wan/denoise.py | 3 +-- 12 files changed, 37 insertions(+), 34 deletions(-) diff --git a/src/diffusers/guiders/adaptive_projected_guidance.py b/src/diffusers/guiders/adaptive_projected_guidance.py index 81137db106..92b1fd5a1c 100644 --- a/src/diffusers/guiders/adaptive_projected_guidance.py +++ b/src/diffusers/guiders/adaptive_projected_guidance.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import torch from ..configuration_utils import register_to_config -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -92,7 +92,7 @@ class AdaptiveProjectedGuidance(BaseGuidance): data_batches.append(data_batch) return data_batches - def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> GuiderOutput: pred = None if not self._is_apg_enabled(): @@ -111,7 +111,7 @@ class AdaptiveProjectedGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/guiders/auto_guidance.py b/src/diffusers/guiders/auto_guidance.py index e1642211d3..8f4d7b11c9 100644 --- a/src/diffusers/guiders/auto_guidance.py +++ b/src/diffusers/guiders/auto_guidance.py @@ -20,7 +20,7 @@ import torch from ..configuration_utils import register_to_config from ..hooks import HookRegistry, LayerSkipConfig from ..hooks.layer_skip import _apply_layer_skip_hook -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -145,7 +145,7 @@ class AutoGuidance(BaseGuidance): data_batches.append(data_batch) return data_batches - def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> GuiderOutput: pred = None if not self._is_ag_enabled(): @@ -158,7 +158,7 @@ class AutoGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/guiders/classifier_free_guidance.py b/src/diffusers/guiders/classifier_free_guidance.py index 7e72b92fce..050590336f 100644 --- a/src/diffusers/guiders/classifier_free_guidance.py +++ b/src/diffusers/guiders/classifier_free_guidance.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import torch from ..configuration_utils import register_to_config -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -96,7 +96,7 @@ class ClassifierFreeGuidance(BaseGuidance): data_batches.append(data_batch) return data_batches - def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> GuiderOutput: pred = None if not self._is_cfg_enabled(): @@ -109,7 +109,7 @@ class ClassifierFreeGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/guiders/classifier_free_zero_star_guidance.py b/src/diffusers/guiders/classifier_free_zero_star_guidance.py index 85d5cc62d4..b64e356331 100644 --- a/src/diffusers/guiders/classifier_free_zero_star_guidance.py +++ b/src/diffusers/guiders/classifier_free_zero_star_guidance.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import torch from ..configuration_utils import register_to_config -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -89,7 +89,7 @@ class ClassifierFreeZeroStarGuidance(BaseGuidance): data_batches.append(data_batch) return data_batches - def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> GuiderOutput: pred = None if self._step < self.zero_init_steps: @@ -109,7 +109,7 @@ class ClassifierFreeZeroStarGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/guiders/frequency_decoupled_guidance.py b/src/diffusers/guiders/frequency_decoupled_guidance.py index 35bc99ac4d..2bf2f430b1 100644 --- a/src/diffusers/guiders/frequency_decoupled_guidance.py +++ b/src/diffusers/guiders/frequency_decoupled_guidance.py @@ -19,7 +19,7 @@ import torch from ..configuration_utils import register_to_config from ..utils import is_kornia_available -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -230,7 +230,7 @@ class FrequencyDecoupledGuidance(BaseGuidance): data_batches.append(data_batch) return data_batches - def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> GuiderOutput: pred = None if not self._is_fdg_enabled(): @@ -277,7 +277,7 @@ class FrequencyDecoupledGuidance(BaseGuidance): if self.guidance_rescale_space == "data" and self.guidance_rescale[0] > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale[0]) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/guiders/guider_utils.py b/src/diffusers/guiders/guider_utils.py index 9dc83a7f1d..a6f2e76dc3 100644 --- a/src/diffusers/guiders/guider_utils.py +++ b/src/diffusers/guiders/guider_utils.py @@ -20,7 +20,7 @@ from huggingface_hub.utils import validate_hf_hub_args from typing_extensions import Self from ..configuration_utils import ConfigMixin -from ..utils import PushToHubMixin, get_logger +from ..utils import BaseOutput, PushToHubMixin, get_logger if TYPE_CHECKING: @@ -284,6 +284,12 @@ class BaseGuidance(ConfigMixin, PushToHubMixin): self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) +class GuiderOutput(BaseOutput): + pred: torch.Tensor + pred_cond: Optional[torch.Tensor] + pred_uncond: Optional[torch.Tensor] + + def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on diff --git a/src/diffusers/guiders/perturbed_attention_guidance.py b/src/diffusers/guiders/perturbed_attention_guidance.py index 1b2256732f..e294e8d0db 100644 --- a/src/diffusers/guiders/perturbed_attention_guidance.py +++ b/src/diffusers/guiders/perturbed_attention_guidance.py @@ -21,7 +21,7 @@ from ..configuration_utils import register_to_config from ..hooks import HookRegistry, LayerSkipConfig from ..hooks.layer_skip import _apply_layer_skip_hook from ..utils import get_logger -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -197,7 +197,7 @@ class PerturbedAttentionGuidance(BaseGuidance): pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None, pred_cond_skip: Optional[torch.Tensor] = None, - ) -> torch.Tensor: + ) -> GuiderOutput: pred = None if not self._is_cfg_enabled() and not self._is_slg_enabled(): @@ -219,7 +219,7 @@ class PerturbedAttentionGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance.is_conditional diff --git a/src/diffusers/guiders/skip_layer_guidance.py b/src/diffusers/guiders/skip_layer_guidance.py index 68a657960a..3530df8b0a 100644 --- a/src/diffusers/guiders/skip_layer_guidance.py +++ b/src/diffusers/guiders/skip_layer_guidance.py @@ -20,7 +20,7 @@ import torch from ..configuration_utils import register_to_config from ..hooks import HookRegistry, LayerSkipConfig from ..hooks.layer_skip import _apply_layer_skip_hook -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -192,7 +192,7 @@ class SkipLayerGuidance(BaseGuidance): pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None, pred_cond_skip: Optional[torch.Tensor] = None, - ) -> torch.Tensor: + ) -> GuiderOutput: pred = None if not self._is_cfg_enabled() and not self._is_slg_enabled(): @@ -214,7 +214,7 @@ class SkipLayerGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/guiders/smoothed_energy_guidance.py b/src/diffusers/guiders/smoothed_energy_guidance.py index d8e8a3cf2f..767d20b62f 100644 --- a/src/diffusers/guiders/smoothed_energy_guidance.py +++ b/src/diffusers/guiders/smoothed_energy_guidance.py @@ -20,7 +20,7 @@ import torch from ..configuration_utils import register_to_config from ..hooks import HookRegistry from ..hooks.smoothed_energy_guidance_utils import SmoothedEnergyGuidanceConfig, _apply_smoothed_energy_guidance_hook -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -181,7 +181,7 @@ class SmoothedEnergyGuidance(BaseGuidance): pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None, pred_cond_seg: Optional[torch.Tensor] = None, - ) -> torch.Tensor: + ) -> GuiderOutput: pred = None if not self._is_cfg_enabled() and not self._is_seg_enabled(): @@ -203,7 +203,7 @@ class SmoothedEnergyGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/guiders/tangential_classifier_free_guidance.py b/src/diffusers/guiders/tangential_classifier_free_guidance.py index b3187e5263..df1e69fe71 100644 --- a/src/diffusers/guiders/tangential_classifier_free_guidance.py +++ b/src/diffusers/guiders/tangential_classifier_free_guidance.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import torch from ..configuration_utils import register_to_config -from .guider_utils import BaseGuidance, rescale_noise_cfg +from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: @@ -78,7 +78,7 @@ class TangentialClassifierFreeGuidance(BaseGuidance): data_batches.append(data_batch) return data_batches - def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> GuiderOutput: pred = None if not self._is_tcfg_enabled(): @@ -89,7 +89,7 @@ class TangentialClassifierFreeGuidance(BaseGuidance): if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) - return pred, {} + return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: diff --git a/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py b/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py index 96df9711cc..34e07dff8a 100644 --- a/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +++ b/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py @@ -238,7 +238,7 @@ class StableDiffusionXLLoopDenoiser(ModularPipelineBlocks): components.guider.cleanup_models(components.unet) # Perform guidance - block_state.noise_pred, block_state.scheduler_step_kwargs = components.guider(guider_state) + block_state.noise_pred = components.guider(guider_state)[0] return components, block_state @@ -433,7 +433,7 @@ class StableDiffusionXLControlNetLoopDenoiser(ModularPipelineBlocks): components.guider.cleanup_models(components.unet) # Perform guidance - block_state.noise_pred, block_state.scheduler_step_kwargs = components.guider(guider_state) + block_state.noise_pred = components.guider(guider_state)[0] return components, block_state @@ -492,7 +492,6 @@ class StableDiffusionXLLoopAfterDenoiser(ModularPipelineBlocks): t, block_state.latents, **block_state.extra_step_kwargs, - **block_state.scheduler_step_kwargs, return_dict=False, )[0] @@ -590,7 +589,6 @@ class StableDiffusionXLInpaintLoopAfterDenoiser(ModularPipelineBlocks): t, block_state.latents, **block_state.extra_step_kwargs, - **block_state.scheduler_step_kwargs, return_dict=False, )[0] diff --git a/src/diffusers/modular_pipelines/wan/denoise.py b/src/diffusers/modular_pipelines/wan/denoise.py index 9871d4ad61..34297bcfb5 100644 --- a/src/diffusers/modular_pipelines/wan/denoise.py +++ b/src/diffusers/modular_pipelines/wan/denoise.py @@ -127,7 +127,7 @@ class WanLoopDenoiser(ModularPipelineBlocks): components.guider.cleanup_models(components.transformer) # Perform guidance - block_state.noise_pred, block_state.scheduler_step_kwargs = components.guider(guider_state) + block_state.noise_pred = components.guider(guider_state)[0] return components, block_state @@ -171,7 +171,6 @@ class WanLoopAfterDenoiser(ModularPipelineBlocks): block_state.noise_pred.float(), t, block_state.latents.float(), - **block_state.scheduler_step_kwargs, return_dict=False, )[0] From 22b229ba66533fd3e6ce3b8568b5a5ee8ed207dc Mon Sep 17 00:00:00 2001 From: Sadhvi <41192585+akiseakusa@users.noreply.github.com> Date: Mon, 25 Aug 2025 07:28:21 +0530 Subject: [PATCH 13/74] added a fast test for Qwen-Image Controlnet Pipeline (#12226) * added test qwen image controlnet * Apply style fixes * added test qwenimage multicontrolnet * Apply style fixes --------- Co-authored-by: github-actions[bot] --- .../qwenimage/test_qwenimage_controlnet.py | 339 ++++++++++++++++++ 1 file changed, 339 insertions(+) create mode 100644 tests/pipelines/qwenimage/test_qwenimage_controlnet.py diff --git a/tests/pipelines/qwenimage/test_qwenimage_controlnet.py b/tests/pipelines/qwenimage/test_qwenimage_controlnet.py new file mode 100644 index 0000000000..c78e5cb233 --- /dev/null +++ b/tests/pipelines/qwenimage/test_qwenimage_controlnet.py @@ -0,0 +1,339 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImageControlNetModel, + QwenImageControlNetPipeline, + QwenImageMultiControlNetModel, + QwenImageTransformer2DModel, +) +from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from diffusers.utils.torch_utils import randn_tensor + +from ..pipeline_params import TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class QwenControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = QwenImageControlNetPipeline + params = (TEXT_TO_IMAGE_PARAMS | frozenset(["control_image", "controlnet_conditioning_scale"])) - { + "cross_attention_kwargs" + } + batch_params = frozenset(["prompt", "negative_prompt", "control_image"]) + image_params = frozenset(["control_image"]) + image_latents_params = frozenset(["latents"]) + + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "control_image", + "controlnet_conditioning_scale", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + supports_dduf = False + test_xformers_attention = True + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = QwenImageTransformer2DModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + guidance_embeds=False, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + controlnet = QwenImageControlNetModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + z_dim = 4 + vae = AutoencoderKLQwenImage( + base_dim=z_dim * 6, + z_dim=z_dim, + dim_mult=[1, 2, 4], + num_res_blocks=1, + temperal_downsample=[False, True], + latents_mean=[0.0] * z_dim, + latents_std=[1.0] * z_dim, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1_000_000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, + ) + + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "controlnet": controlnet, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + control_image = randn_tensor( + (1, 3, 32, 32), + generator=generator, + device=torch.device(device), + dtype=torch.float32, + ) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "true_cfg_scale": 1.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "control_image": control_image, + "controlnet_conditioning_scale": 0.5, + "output_type": "pt", + } + + return inputs + + def test_qwen_controlnet(self): + device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + # Expected slice from the generated image + expected_slice = torch.tensor( + [ + 0.4726, + 0.5549, + 0.6324, + 0.6548, + 0.4968, + 0.4639, + 0.4749, + 0.4898, + 0.4725, + 0.4645, + 0.4435, + 0.3339, + 0.3400, + 0.4630, + 0.3879, + 0.4406, + ] + ) + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_qwen_controlnet_multicondition(self): + device = "cpu" + components = self.get_dummy_components() + + components["controlnet"] = QwenImageMultiControlNetModel([components["controlnet"]]) + + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + control_image = inputs["control_image"] + inputs["control_image"] = [control_image, control_image] + inputs["controlnet_conditioning_scale"] = [0.5, 0.5] + + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + # Expected slice from the generated image + expected_slice = torch.tensor( + [ + 0.6239, + 0.6642, + 0.5768, + 0.6039, + 0.5270, + 0.5070, + 0.5006, + 0.5271, + 0.4506, + 0.3085, + 0.3435, + 0.5152, + 0.5096, + 0.5422, + 0.4286, + 0.5752, + ] + ) + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + inputs["control_image"] = randn_tensor( + (1, 3, 128, 128), + generator=inputs["generator"], + device=torch.device(generator_device), + dtype=torch.float32, + ) + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + inputs["control_image"] = randn_tensor( + (1, 3, 128, 128), + generator=inputs["generator"], + device=torch.device(generator_device), + dtype=torch.float32, + ) + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) From 144e6e2540dd2cf5b0ba26438f4ff0da0ca2e659 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Mon, 25 Aug 2025 21:00:12 +0530 Subject: [PATCH 14/74] [docs] change wan2.1 -> wan (#12230) * change wan2.1 -> wan * up --- docs/source/en/api/pipelines/wan.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/api/pipelines/wan.md b/docs/source/en/api/pipelines/wan.md index b9c5990f24..3289a840e2 100644 --- a/docs/source/en/api/pipelines/wan.md +++ b/docs/source/en/api/pipelines/wan.md @@ -20,7 +20,7 @@ -# Wan2.1 +# Wan [Wan-2.1](https://huggingface.co/papers/2503.20314) by the Wan Team. @@ -42,7 +42,7 @@ The following Wan models are supported in Diffusers: - [Wan 2.2 TI2V 5B](https://huggingface.co/Wan-AI/Wan2.2-TI2V-5B-Diffusers) > [!TIP] -> Click on the Wan2.1 models in the right sidebar for more examples of video generation. +> Click on the Wan models in the right sidebar for more examples of video generation. ### Text-to-Video Generation From cf1ca728eabb8354ce5be57cf4d97d503a01dbb9 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Mon, 25 Aug 2025 21:12:06 +0530 Subject: [PATCH 15/74] fix title for compile + offload quantized models (#12233) * up * up * Apply suggestions from code review Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/en/_toctree.yml | 2 +- docs/source/en/optimization/speed-memory-optims.md | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 42558b636c..fccec0a080 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -77,7 +77,7 @@ - local: optimization/memory title: Reduce memory usage - local: optimization/speed-memory-optims - title: Compile and offloading quantized models + title: Compiling and offloading quantized models - title: Community optimizations sections: - local: optimization/pruna diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md index f43e60bc74..80c6c79a3c 100644 --- a/docs/source/en/optimization/speed-memory-optims.md +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Compile and offloading quantized models +# Compiling and offloading quantized models Optimizing models often involves trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it also increases memory consumption since it needs to store the outputs of intermediate attention layers. A more balanced optimization strategy combines quantizing a model, [torch.compile](./fp16#torchcompile) and various [offloading methods](./memory#offloading). @@ -28,7 +28,8 @@ The table below provides a comparison of optimization strategy combinations and | quantization | 32.602 | 14.9453 | | quantization, torch.compile | 25.847 | 14.9448 | | quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 | -These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the [benchmarking script](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d) if you're interested in evaluating your own model. + +These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the benchmarking script if you're interested in evaluating your own model. This guide will show you how to compile and offload a quantized model with [bitsandbytes](../quantization/bitsandbytes#torchcompile). Make sure you are using [PyTorch nightly](https://pytorch.org/get-started/locally/) and the latest version of bitsandbytes. From 2c4ee10b7736ae52e4ae289489b8d19422280d37 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Mon, 25 Aug 2025 11:06:12 -0700 Subject: [PATCH 16/74] [docs] Diffusion pipeline (#12148) * init * refactor * refresh * fix? * fix? * fix * fix-copies * feedback * feedback * fix * feedback --- docs/source/en/_toctree.yml | 4 +- docs/source/en/using-diffusers/loading.md | 701 ++++++---------------- 2 files changed, 187 insertions(+), 518 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index fccec0a080..18adba9223 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -9,11 +9,11 @@ - local: stable_diffusion title: Basic performance -- title: DiffusionPipeline +- title: Pipelines isExpanded: false sections: - local: using-diffusers/loading - title: Load pipelines + title: DiffusionPipeline - local: tutorials/autopipeline title: AutoPipeline - local: using-diffusers/custom_pipeline_overview diff --git a/docs/source/en/using-diffusers/loading.md b/docs/source/en/using-diffusers/loading.md index 20f0cc51e0..f86ea104cf 100644 --- a/docs/source/en/using-diffusers/loading.md +++ b/docs/source/en/using-diffusers/loading.md @@ -10,116 +10,166 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Load pipelines - [[open-in-colab]] -Diffusion systems consist of multiple components like parameterized models and schedulers that interact in complex ways. That is why we designed the [`DiffusionPipeline`] to wrap the complexity of the entire diffusion system into an easy-to-use API. At the same time, the [`DiffusionPipeline`] is entirely customizable so you can modify each component to build a diffusion system for your use case. +# DiffusionPipeline -This guide will show you how to load: +Diffusion models consists of multiple components like UNets or diffusion transformers (DiTs), text encoders, variational autoencoders (VAEs), and schedulers. The [`DiffusionPipeline`] wraps all of these components into a single easy-to-use API without giving up the flexibility to modify it's components. -- pipelines from the Hub and locally -- different components into a pipeline -- multiple pipelines without increasing memory usage -- checkpoint variants such as different floating point types or non-exponential mean averaged (EMA) weights +This guide will show you how to load a [`DiffusionPipeline`]. -## Load a pipeline +## Loading a pipeline -> [!TIP] -> Skip to the [DiffusionPipeline explained](#diffusionpipeline-explained) section if you're interested in an explanation about how the [`DiffusionPipeline`] class works. +[`DiffusionPipeline`] is a base pipeline class that automatically selects and returns an instance of a model's pipeline subclass, like [`QwenImagePipeline`], by scanning the `model_index.json` file for the class name. -There are two ways to load a pipeline for a task: - -1. Load the generic [`DiffusionPipeline`] class and allow it to automatically detect the correct pipeline class from the checkpoint. -2. Load a specific pipeline class for a specific task. - - - - -The [`DiffusionPipeline`] class is a simple and generic way to load the latest trending diffusion model from the [Hub](https://huggingface.co/models?library=diffusers&sort=trending). It uses the [`~DiffusionPipeline.from_pretrained`] method to automatically detect the correct pipeline class for a task from the checkpoint, downloads and caches all the required configuration and weight files, and returns a pipeline ready for inference. - -```python -from diffusers import DiffusionPipeline - -pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) -``` - -This same checkpoint can also be used for an image-to-image task. The [`DiffusionPipeline`] class can handle any task as long as you provide the appropriate inputs. For example, for an image-to-image task, you need to pass an initial image to the pipeline. +Pass a model id to [`~DiffusionPipeline.from_pretrained`] to load a pipeline. ```py +import torch from diffusers import DiffusionPipeline -pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) +pipeline = DiffusionPipeline.from_pretrained( + "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" +) +``` -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png") -prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" -image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=init_image).images[0] +Every model has a specific pipeline subclass that inherits from [`DiffusionPipeline`]. A subclass usually has a narrow focus and are task-specific. See the table below for an example. + +| pipeline subclass | task | +|---|---| +| [`QwenImagePipeline`] | text-to-image | +| [`QwenImageImg2ImgPipeline`] | image-to-image | +| [`QwenImageInpaintPipeline`] | inpaint | + +You could use the subclass directly by passing a model id to [`~QwenImagePipeline.from_pretrained`]. + +```py +import torch +from diffusers import QwenImagePipeline + +pipeline = QwenImagePipeline.from_pretrained( + "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" +) +``` + +### Local pipelines + +Pipelines can also be run locally. Use [`~huggingface_hub.snapshot_download`] to download a model repository. + +```py +from huggingface_hub import snapshot_download + +snapshot_download(repo_id="Qwen/Qwen-Image") +``` + +The model is downloaded to your [cache](../installation#cache). Pass the folder path to [`~QwenImagePipeline.from_pretrained`] to load it. + +```py +import torch +from diffusers import QwenImagePipeline + +pipeline = QwenImagePipeline.from_pretrained( + "path/to/your/cache", torch_dtype=torch.bfloat16, device_map="cuda" +) +``` + +The [`~QwenImagePipeline.from_pretrained`] method won't download files from the Hub when it detects a local path. But this also means it won't download and cache any updates that have been made to the model either. + +## Pipeline data types + +Use the `torch_dtype` argument in [`~DiffusionPipeline.from_pretrained`] to load a model with a specific data type. This allows you to load different models in different precisions. For example, loading a large transformer model in half-precision reduces the memory required. + +Pass the data type for each model as a dictionary to `torch_dtype`. Use the `default` key to set the default data type. If a model isn't in the dictionary and `default` isn't provided, it is loaded in full precision (`torch.float32`). + +```py +import torch +from diffusers import QwenImagePipeline + +pipeline = QwenImagePipeline.from_pretrained( + "Qwen/Qwen-Image", + torch_dtype={"transformer": torch.bfloat16, "default": torch.float16}, +) +print(pipeline.transformer.dtype, pipeline.vae.dtype) +``` + +You don't need to use a dictionary if you're loading all the models in the same data type. + +```py +import torch +from diffusers import QwenImagePipeline + +pipeline = QwenImagePipeline.from_pretrained( + "Qwen/Qwen-Image", torch_dtype=torch.bfloat16 +) +print(pipeline.transformer.dtype, pipeline.vae.dtype) +``` + +## Device placement + +The `device_map` argument determines individual model or pipeline placement on an accelerator like a GPU. It is especially helpful when there are multiple GPUs. + +Diffusers currently provides three options to `device_map`, `"cuda"`, `"balanced"` and `"auto"`. Refer to the table below to compare the three placement strategies. + +| parameter | description | +|---|---| +| `"cuda"` | places model or pipeline on CUDA device | +| `"balanced"` | evenly distributes model or pipeline on all GPUs | +| `"auto"` | distribute model from fastest device first to slowest | + +Use the `max_memory` argument in [`~DiffusionPipeline.from_pretrained`] to allocate a maximum amount of memory to use on each device. By default, Diffusers uses the maximum amount available. + + + + +```py +import torch +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained( + "Qwen/Qwen-Image", + torch_dtype=torch.bfloat16, + device_map="cuda", +) ``` - - -Checkpoints can be loaded by their specific pipeline class if you already know it. For example, to load a Stable Diffusion model, use the [`StableDiffusionPipeline`] class. - -```python -from diffusers import StableDiffusionPipeline - -pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) -``` - -This same checkpoint may also be used for another task like image-to-image. To differentiate what task you want to use the checkpoint for, you have to use the corresponding task-specific pipeline class. For example, to use the same checkpoint for image-to-image, use the [`StableDiffusionImg2ImgPipeline`] class. + ```py -from diffusers import StableDiffusionImg2ImgPipeline +import torch +from diffusers import AutoModel -pipeline = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) +max_memory = {0: "16GB", 1: "16GB"} +transformer = AutoModel.from_pretrained( + "Qwen/Qwen-Image", + subfolder="transformer", + torch_dtype=torch.bfloat16 + device_map="cuda", + max_memory=max_memory +) ``` -Use the Space below to gauge a pipeline's memory requirements before you download and load it to see if it runs on your hardware. +The `hf_device_map` attribute allows you to access and view the `device_map`. -
- -
- - -### Specifying Component-Specific Data Types - -You can customize the data types for individual sub-models by passing a dictionary to the `torch_dtype` parameter. This allows you to load different components of a pipeline in different floating point precisions. For instance, if you want to load the transformer with `torch.bfloat16` and all other components with `torch.float16`, you can pass a dictionary mapping: - -```python -from diffusers import HunyuanVideoPipeline -import torch - -pipe = HunyuanVideoPipeline.from_pretrained( - "hunyuanvideo-community/HunyuanVideo", - torch_dtype={"transformer": torch.bfloat16, "default": torch.float16}, -) -print(pipe.transformer.dtype, pipe.vae.dtype) # (torch.bfloat16, torch.float16) +```py +print(pipeline.hf_device_map) +# {'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0} ``` -If a component is not explicitly specified in the dictionary and no `default` is provided, it will be loaded with `torch.float32`. +Reset a pipeline's `device_map` with the [`~DiffusionPipeline.reset_device_map`] method. This is necessary if you want to use methods such as `.to()`, [`~DiffusionPipeline.enable_sequential_cpu_offload`], and [`~DiffusionPipeline.enable_model_cpu_offload`]. -### Parallel loading +```py +pipeline.reset_device_map() +``` + +## Parallel loading Large models are often [sharded](../training/distributed_inference#model-sharding) into smaller files so that they are easier to load. Diffusers supports loading shards in parallel to speed up the loading process. -Set the environment variables below to enable parallel loading. - -- Set `HF_ENABLE_PARALLEL_LOADING` to `"YES"` to enable parallel loading of shards. -- Set `HF_PARALLEL_LOADING_WORKERS` to configure the number of parallel threads to use when loading shards. More workers loads a model faster but uses more memory. +Set `HF_ENABLE_PARALLEL_LOADING` to `"YES"` to enable parallel loading of shards. The `device_map` argument should be set to `"cuda"` to pre-allocate a large chunk of memory based on the model size. This substantially reduces model load time because warming up the memory allocator now avoids many smaller calls to the allocator later. @@ -129,479 +179,98 @@ import torch from diffusers import DiffusionPipeline os.environ["HF_ENABLE_PARALLEL_LOADING"] = "YES" + pipeline = DiffusionPipeline.from_pretrained( - "Wan-AI/Wan2.2-I2V-A14B-Diffusers", - torch_dtype=torch.bfloat16, - device_map="cuda" + "Wan-AI/Wan2.2-I2V-A14B-Diffusers", torch_dtype=torch.bfloat16, device_map="cuda" ) ``` -### Local pipeline +## Replacing models in a pipeline -To load a pipeline locally, use [git-lfs](https://git-lfs.github.com/) to manually download a checkpoint to your local disk. +[`DiffusionPipeline`] is flexible and accommodates loading different models or schedulers. You can experiment with different schedulers to optimize for generation speed or quality, and you can replace models with more performant ones. -```bash -git-lfs install -git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 -``` - -This creates a local folder, ./stable-diffusion-v1-5, on your disk and you should pass its path to [`~DiffusionPipeline.from_pretrained`]. - -```python -from diffusers import DiffusionPipeline - -stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) -``` - -The [`~DiffusionPipeline.from_pretrained`] method won't download files from the Hub when it detects a local path, but this also means it won't download and cache the latest changes to a checkpoint. - -## Customize a pipeline - -You can customize a pipeline by loading different components into it. This is important because you can: - -- change to a scheduler with faster generation speed or higher generation quality depending on your needs (call the `scheduler.compatibles` method on your pipeline to see compatible schedulers) -- change a default pipeline component to a newer and better performing one - -For example, let's customize the default [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0) checkpoint with: - -- The [`HeunDiscreteScheduler`] to generate higher quality images at the expense of slower generation speed. You must pass the `subfolder="scheduler"` parameter in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler configuration into the correct [subfolder](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/scheduler) of the pipeline repository. -- A more stable VAE that runs in fp16. +The example below swaps the default scheduler to generate higher quality images and a more stable VAE version. Pass the `subfolder` argument in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler to the correct subfolder. ```py -from diffusers import StableDiffusionXLPipeline, HeunDiscreteScheduler, AutoencoderKL import torch +from diffusers import DiffusionPipeline, HeunDiscreteScheduler, AutoModel -scheduler = HeunDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler") -vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) -``` +scheduler = HeunDiscreteScheduler.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler" +) +vae = AutoModel.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 +) -Now pass the new scheduler and VAE to the [`StableDiffusionXLPipeline`]. - -```py -pipeline = StableDiffusionXLPipeline.from_pretrained( +pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", scheduler=scheduler, vae=vae, torch_dtype=torch.float16, - variant="fp16", - use_safetensors=True -).to("cuda") + device_map="cuda" +) ``` -## Reuse a pipeline +## Reusing models in multiple pipelines -When you load multiple pipelines that share the same model components, it makes sense to reuse the shared components instead of reloading everything into memory again, especially if your hardware is memory-constrained. For example: +When working with multiple pipelines that use the same model, the [`~DiffusionPipeline.from_pipe`] method enables reusing a model instead of reloading it each time. This allows you to use multiple pipelines without increasing memory usage. -1. You generated an image with the [`StableDiffusionPipeline`] but you want to improve its quality with the [`StableDiffusionSAGPipeline`]. Both of these pipelines share the same pretrained model, so it'd be a waste of memory to load the same model twice. -2. You want to add a model component, like a [`MotionAdapter`](../api/pipelines/animatediff#animatediffpipeline), to [`AnimateDiffPipeline`] which was instantiated from an existing [`StableDiffusionPipeline`]. Again, both pipelines share the same pretrained model, so it'd be a waste of memory to load an entirely new pipeline again. +Memory usage is determined by the pipeline with the highest memory requirement regardless of the number of pipelines. -With the [`DiffusionPipeline.from_pipe`] API, you can switch between multiple pipelines to take advantage of their different features without increasing memory-usage. It is similar to turning on and off a feature in your pipeline. - -> [!TIP] -> To switch between tasks (rather than features), use the [`~DiffusionPipeline.from_pipe`] method with the [AutoPipeline](../api/pipelines/auto_pipeline) class, which automatically identifies the pipeline class based on the task (learn more in the [AutoPipeline](../tutorials/autopipeline) tutorial). - -Let's start with a [`StableDiffusionPipeline`] and then reuse the loaded model components to create a [`StableDiffusionSAGPipeline`] to increase generation quality. You'll use the [`StableDiffusionPipeline`] with an [IP-Adapter](./ip_adapter) to generate a bear eating pizza. - -```python -from diffusers import DiffusionPipeline, StableDiffusionSAGPipeline -import torch -import gc -from diffusers.utils import load_image -from accelerate.utils import compute_module_sizes - -image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png") - -pipe_sd = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", torch_dtype=torch.float16) -pipe_sd.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") -pipe_sd.set_ip_adapter_scale(0.6) -pipe_sd.to("cuda") - -generator = torch.Generator(device="cpu").manual_seed(33) -out_sd = pipe_sd( - prompt="bear eats pizza", - negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", - ip_adapter_image=image, - num_inference_steps=50, - generator=generator, -).images[0] -out_sd -``` - -
- -
- -For reference, you can check how much memory this process consumed. - -```python -def bytes_to_giga_bytes(bytes): - return bytes / 1024 / 1024 / 1024 -print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") -"Max memory allocated: 4.406213283538818 GB" -``` - -Now, reuse the same pipeline components from [`StableDiffusionPipeline`] in [`StableDiffusionSAGPipeline`] with the [`~DiffusionPipeline.from_pipe`] method. +The example below loads a pipeline and then loads a second pipeline with [`~DiffusionPipeline.from_pipe`] to use [perturbed-attention guidance (PAG)](../api/pipelines/pag) to improve generation quality. > [!WARNING] -> Some pipeline methods may not function properly on new pipelines created with [`~DiffusionPipeline.from_pipe`]. For instance, the [`~DiffusionPipeline.enable_model_cpu_offload`] method installs hooks on the model components based on a unique offloading sequence for each pipeline. If the models are executed in a different order in the new pipeline, the CPU offloading may not work correctly. -> -> To ensure everything works as expected, we recommend re-applying a pipeline method on a new pipeline created with [`~DiffusionPipeline.from_pipe`]. +> Use [`AutoPipelineForText2Image`] because [`DiffusionPipeline`] doesn't support PAG. Refer to the [AutoPipeline](../tutorials/autopipeline) docs to learn more. -```python -pipe_sag = StableDiffusionSAGPipeline.from_pipe( - pipe_sd +```py +import torch +from diffusers import AutoPipelineForText2Image + +pipeline_sdxl = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, device_map="cuda" ) - -generator = torch.Generator(device="cpu").manual_seed(33) -out_sag = pipe_sag( - prompt="bear eats pizza", - negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", - ip_adapter_image=image, - num_inference_steps=50, - generator=generator, - guidance_scale=1.0, - sag_scale=0.75 -).images[0] -out_sag +prompt = """ +cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California +highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain +""" +image = pipeline_sdxl(prompt).images[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +# Max memory reserved: 10.47 GB ``` -
- -
- -If you check the memory usage, you'll see it remains the same as before because [`StableDiffusionPipeline`] and [`StableDiffusionSAGPipeline`] are sharing the same pipeline components. This allows you to use them interchangeably without any additional memory overhead. +Set `enable_pag=True` in the second pipeline to enable PAG. The second pipeline uses the same amount of memory because it shares model weights with the first one. ```py -print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") -"Max memory allocated: 4.406213283538818 GB" +pipeline = AutoPipelineForText2Image.from_pipe( + pipeline_sdxl, enable_pag=True +) +prompt = """ +cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California +highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain +""" +image = pipeline(prompt).images[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +# Max memory reserved: 10.47 GB ``` -Let's animate the image with the [`AnimateDiffPipeline`] and also add a [`MotionAdapter`] module to the pipeline. For the [`AnimateDiffPipeline`], you need to unload the IP-Adapter first and reload it *after* you've created your new pipeline (this only applies to the [`AnimateDiffPipeline`]). +> [!WARNING] +> Pipelines created by [`~DiffusionPipeline.from_pipe`] share the same models and *state*. Modifying the state of a model in one pipeline affects all the other pipelines that share the same model. -```py -from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler -from diffusers.utils import export_to_gif - -pipe_sag.unload_ip_adapter() -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) - -pipe_animate = AnimateDiffPipeline.from_pipe(pipe_sd, motion_adapter=adapter) -pipe_animate.scheduler = DDIMScheduler.from_config(pipe_animate.scheduler.config, beta_schedule="linear") -# load IP-Adapter and LoRA weights again -pipe_animate.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") -pipe_animate.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") -pipe_animate.to("cuda") - -generator = torch.Generator(device="cpu").manual_seed(33) -pipe_animate.set_adapters("zoom-out", adapter_weights=0.75) -out = pipe_animate( - prompt="bear eats pizza", - num_frames=16, - num_inference_steps=50, - ip_adapter_image=image, - generator=generator, -).frames[0] -export_to_gif(out, "out_animate.gif") -``` - -
- -
- -The [`AnimateDiffPipeline`] is more memory-intensive and consumes 15GB of memory (see the [Memory-usage of from_pipe](#memory-usage-of-from_pipe) section to learn what this means for your memory-usage). - -```py -print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") -"Max memory allocated: 15.178664207458496 GB" -``` - -### Modify from_pipe components - -Pipelines loaded with [`~DiffusionPipeline.from_pipe`] can be customized with different model components or methods. However, whenever you modify the *state* of the model components, it affects all the other pipelines that share the same components. For example, if you call [`~diffusers.loaders.IPAdapterMixin.unload_ip_adapter`] on the [`StableDiffusionSAGPipeline`], you won't be able to use IP-Adapter with the [`StableDiffusionPipeline`] because it's been removed from their shared components. - -```py -pipe.sag_unload_ip_adapter() - -generator = torch.Generator(device="cpu").manual_seed(33) -out_sd = pipe_sd( - prompt="bear eats pizza", - negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", - ip_adapter_image=image, - num_inference_steps=50, - generator=generator, -).images[0] -"AttributeError: 'NoneType' object has no attribute 'image_projection_layers'" -``` - -### Memory usage of from_pipe - -The memory requirement of loading multiple pipelines with [`~DiffusionPipeline.from_pipe`] is determined by the pipeline with the highest memory-usage regardless of the number of pipelines you create. - -| Pipeline | Memory usage (GB) | -|---|---| -| StableDiffusionPipeline | 4.400 | -| StableDiffusionSAGPipeline | 4.400 | -| AnimateDiffPipeline | 15.178 | - -The [`AnimateDiffPipeline`] has the highest memory requirement, so the *total memory-usage* is based only on the [`AnimateDiffPipeline`]. Your memory-usage will not increase if you create additional pipelines as long as their memory requirements doesn't exceed that of the [`AnimateDiffPipeline`]. Each pipeline can be used interchangeably without any additional memory overhead. +Some methods may not work correctly on pipelines created with [`~DiffusionPipeline.from_pipe`]. For example, [`~DiffusionPipeline.enable_model_cpu_offload`] relies on a unique model execution order, which may differ in the new pipeline. To ensure proper functionality, reapply these methods on the new pipeline. ## Safety checker -Diffusers implements a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) for Stable Diffusion models which can generate harmful content. The safety checker screens the generated output against known hardcoded not-safe-for-work (NSFW) content. If for whatever reason you'd like to disable the safety checker, pass `safety_checker=None` to the [`~DiffusionPipeline.from_pretrained`] method. +Diffusers provides a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) for older Stable Diffusion models to prevent generating harmful content. It screens the generated output against a set of hardcoded harmful concepts. -```python +If you want to disable the safety checker, pass `safety_checker=None` in [`~DiffusionPipeline.from_pretrained`] as shown below. + +```py from diffusers import DiffusionPipeline -pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, use_safetensors=True) +pipeline = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None +) """ You have disabled the safety checker for by passing `safety_checker=None`. Ensure that you abide by the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend keeping the safety filter enabled in all public-facing circumstances, disabling it only for use cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 . """ -``` - -## Checkpoint variants - -A checkpoint variant is usually a checkpoint whose weights are: - -- Stored in a different floating point type, such as [torch.float16](https://pytorch.org/docs/stable/tensors.html#data-types), because it only requires half the bandwidth and storage to download. You can't use this variant if you're continuing training or using a CPU. -- Non-exponential mean averaged (EMA) weights which shouldn't be used for inference. You should use this variant to continue finetuning a model. - -> [!TIP] -> When the checkpoints have identical model structures, but they were trained on different datasets and with a different training setup, they should be stored in separate repositories. For example, [stabilityai/stable-diffusion-2](https://hf.co/stabilityai/stable-diffusion-2) and [stabilityai/stable-diffusion-2-1](https://hf.co/stabilityai/stable-diffusion-2-1) are stored in separate repositories. - -Otherwise, a variant is **identical** to the original checkpoint. They have exactly the same serialization format (like [safetensors](./using_safetensors)), model structure, and their weights have identical tensor shapes. - -| **checkpoint type** | **weight name** | **argument for loading weights** | -|---------------------|---------------------------------------------|----------------------------------| -| original | diffusion_pytorch_model.safetensors | | -| floating point | diffusion_pytorch_model.fp16.safetensors | `variant`, `torch_dtype` | -| non-EMA | diffusion_pytorch_model.non_ema.safetensors | `variant` | - -There are two important arguments for loading variants: - -- `torch_dtype` specifies the floating point precision of the loaded checkpoint. For example, if you want to save bandwidth by loading a fp16 variant, you should set `variant="fp16"` and `torch_dtype=torch.float16` to *convert the weights* to fp16. Otherwise, the fp16 weights are converted to the default fp32 precision. - - If you only set `torch_dtype=torch.float16`, the default fp32 weights are downloaded first and then converted to fp16. - -- `variant` specifies which files should be loaded from the repository. For example, if you want to load a non-EMA variant of a UNet from [stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main/unet), set `variant="non_ema"` to download the `non_ema` file. - - - - -```py -from diffusers import DiffusionPipeline -import torch - -pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True -) -``` - - - - -```py -pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema", use_safetensors=True -) -``` - - - - -Use the `variant` parameter in the [`DiffusionPipeline.save_pretrained`] method to save a checkpoint as a different floating point type or as a non-EMA variant. You should try save a variant to the same folder as the original checkpoint, so you have the option of loading both from the same folder. - - - - -```python -from diffusers import DiffusionPipeline - -pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16") -``` - - - - -```py -pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema") -``` - - - - -If you don't save the variant to an existing folder, you must specify the `variant` argument otherwise it'll throw an `Exception` because it can't find the original checkpoint. - -```python -# 👎 this won't work -pipeline = DiffusionPipeline.from_pretrained( - "./stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True -) -# 👍 this works -pipeline = DiffusionPipeline.from_pretrained( - "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True -) -``` - -## DiffusionPipeline explained - -As a class method, [`DiffusionPipeline.from_pretrained`] is responsible for two things: - -- Download the latest version of the folder structure required for inference and cache it. If the latest folder structure is available in the local cache, [`DiffusionPipeline.from_pretrained`] reuses the cache and won't redownload the files. -- Load the cached weights into the correct pipeline [class](../api/pipelines/overview#diffusers-summary) - retrieved from the `model_index.json` file - and return an instance of it. - -The pipelines' underlying folder structure corresponds directly with their class instances. For example, the [`StableDiffusionPipeline`] corresponds to the folder structure in [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5). - -```python -from diffusers import DiffusionPipeline - -repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" -pipeline = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) -print(pipeline) -``` - -You'll see pipeline is an instance of [`StableDiffusionPipeline`], which consists of seven components: - -- `"feature_extractor"`: a [`~transformers.CLIPImageProcessor`] from 🤗 Transformers. -- `"safety_checker"`: a [component](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) for screening against harmful content. -- `"scheduler"`: an instance of [`PNDMScheduler`]. -- `"text_encoder"`: a [`~transformers.CLIPTextModel`] from 🤗 Transformers. -- `"tokenizer"`: a [`~transformers.CLIPTokenizer`] from 🤗 Transformers. -- `"unet"`: an instance of [`UNet2DConditionModel`]. -- `"vae"`: an instance of [`AutoencoderKL`]. - -```json -StableDiffusionPipeline { - "feature_extractor": [ - "transformers", - "CLIPImageProcessor" - ], - "safety_checker": [ - "stable_diffusion", - "StableDiffusionSafetyChecker" - ], - "scheduler": [ - "diffusers", - "PNDMScheduler" - ], - "text_encoder": [ - "transformers", - "CLIPTextModel" - ], - "tokenizer": [ - "transformers", - "CLIPTokenizer" - ], - "unet": [ - "diffusers", - "UNet2DConditionModel" - ], - "vae": [ - "diffusers", - "AutoencoderKL" - ] -} -``` - -Compare the components of the pipeline instance to the [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main) folder structure, and you'll see there is a separate folder for each of the components in the repository: - -``` -. -├── feature_extractor -│   └── preprocessor_config.json -├── model_index.json -├── safety_checker -│   ├── config.json -| ├── model.fp16.safetensors -│ ├── model.safetensors -│ ├── pytorch_model.bin -| └── pytorch_model.fp16.bin -├── scheduler -│   └── scheduler_config.json -├── text_encoder -│   ├── config.json -| ├── model.fp16.safetensors -│ ├── model.safetensors -│ |── pytorch_model.bin -| └── pytorch_model.fp16.bin -├── tokenizer -│   ├── merges.txt -│   ├── special_tokens_map.json -│   ├── tokenizer_config.json -│   └── vocab.json -├── unet -│   ├── config.json -│   ├── diffusion_pytorch_model.bin -| |── diffusion_pytorch_model.fp16.bin -│ |── diffusion_pytorch_model.f16.safetensors -│ |── diffusion_pytorch_model.non_ema.bin -│ |── diffusion_pytorch_model.non_ema.safetensors -│ └── diffusion_pytorch_model.safetensors -|── vae -. ├── config.json -. ├── diffusion_pytorch_model.bin - ├── diffusion_pytorch_model.fp16.bin - ├── diffusion_pytorch_model.fp16.safetensors - └── diffusion_pytorch_model.safetensors -``` - -You can access each of the components of the pipeline as an attribute to view its configuration: - -```py -pipeline.tokenizer -CLIPTokenizer( - name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", - vocab_size=49408, - model_max_length=77, - is_fast=False, - padding_side="right", - truncation_side="right", - special_tokens={ - "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), - "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), - "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), - "pad_token": "<|endoftext|>", - }, - clean_up_tokenization_spaces=True -) -``` - -Every pipeline expects a [`model_index.json`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/model_index.json) file that tells the [`DiffusionPipeline`]: - -- which pipeline class to load from `_class_name` -- which version of 🧨 Diffusers was used to create the model in `_diffusers_version` -- what components from which library are stored in the subfolders (`name` corresponds to the component and subfolder name, `library` corresponds to the name of the library to load the class from, and `class` corresponds to the class name) - -```json -{ - "_class_name": "StableDiffusionPipeline", - "_diffusers_version": "0.6.0", - "feature_extractor": [ - "transformers", - "CLIPImageProcessor" - ], - "safety_checker": [ - "stable_diffusion", - "StableDiffusionSafetyChecker" - ], - "scheduler": [ - "diffusers", - "PNDMScheduler" - ], - "text_encoder": [ - "transformers", - "CLIPTextModel" - ], - "tokenizer": [ - "transformers", - "CLIPTokenizer" - ], - "unet": [ - "diffusers", - "UNet2DConditionModel" - ], - "vae": [ - "diffusers", - "AutoencoderKL" - ] -} -``` +``` \ No newline at end of file From afc9721898d28346f38f7325fd439bee35e9983a Mon Sep 17 00:00:00 2001 From: Cyan <77715972+chencyan21@users.noreply.github.com> Date: Tue, 26 Aug 2025 02:19:55 +0800 Subject: [PATCH 17/74] Fix typo in LoRA (#12228) Fix formatting in using_peft_for_inference.md --- docs/source/en/tutorials/using_peft_for_inference.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/tutorials/using_peft_for_inference.md b/docs/source/en/tutorials/using_peft_for_inference.md index 5cd47f8674..7bdd2a1ee9 100644 --- a/docs/source/en/tutorials/using_peft_for_inference.md +++ b/docs/source/en/tutorials/using_peft_for_inference.md @@ -94,7 +94,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( pipeline.unet.load_lora_adapter( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", - adapter_name="cinematic" + adapter_name="cinematic", prefix="unet" ) # use cnmt in the prompt to trigger the LoRA @@ -688,4 +688,4 @@ Browse the [LoRA Studio](https://lorastudio.co/models) for different LoRAs to us You can find additional LoRAs in the [FLUX LoRA the Explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer) and [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer) Spaces. -Check out the [Fast LoRA inference for Flux with Diffusers and PEFT](https://huggingface.co/blog/lora-fast) blog post to learn how to optimize LoRA inference with methods like FlashAttention-3 and fp8 quantization. \ No newline at end of file +Check out the [Fast LoRA inference for Flux with Diffusers and PEFT](https://huggingface.co/blog/lora-fast) blog post to learn how to optimize LoRA inference with methods like FlashAttention-3 and fp8 quantization. From 8f8888a76ec16ea7afc2cc8e9be04bd8cccf6b37 Mon Sep 17 00:00:00 2001 From: Manith Ratnayake <144333591+Manith-Ratnayake@users.noreply.github.com> Date: Tue, 26 Aug 2025 00:05:48 +0530 Subject: [PATCH 18/74] [docs] typo : corrected 'compile regions' to 'compile_regions' (#12199) [docs] typo: corrected 'compile regions' to 'compile_regions' --- docs/source/en/optimization/fp16.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/optimization/fp16.md b/docs/source/en/optimization/fp16.md index e32cbec917..76d749ecf3 100644 --- a/docs/source/en/optimization/fp16.md +++ b/docs/source/en/optimization/fp16.md @@ -209,7 +209,7 @@ There is also a [compile_regions](https://github.com/huggingface/accelerate/blob # pip install -U accelerate import torch from diffusers import StableDiffusionXLPipeline -from accelerate.utils import compile regions +from accelerate.utils import compile_regions pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 From 0e46c55931928163dfb7cb0ba990c3696fb5d4eb Mon Sep 17 00:00:00 2001 From: Meta <30329784+MetaInsight7@users.noreply.github.com> Date: Tue, 26 Aug 2025 02:35:56 +0800 Subject: [PATCH 19/74] Update README.md (#12193) --- examples/dreambooth/README_qwen.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/dreambooth/README_qwen.md b/examples/dreambooth/README_qwen.md index 0f0b640c8b..68c546a25d 100644 --- a/examples/dreambooth/README_qwen.md +++ b/examples/dreambooth/README_qwen.md @@ -77,7 +77,7 @@ export MODEL_NAME="Qwen/Qwen-Image" export INSTANCE_DIR="dog" export OUTPUT_DIR="trained-qwenimage-lora" -accelerate launch train_dreambooth_lora_qwenimage.py \ +accelerate launch train_dreambooth_lora_qwen_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ From 0d1c5b0c3efd89c5b677d232358228e7f2792927 Mon Sep 17 00:00:00 2001 From: sqt <93052530+sqt24@users.noreply.github.com> Date: Tue, 26 Aug 2025 03:47:52 +0800 Subject: [PATCH 20/74] Fix typo: 'will ge generated' -> 'will be generated' (#12231) --- examples/community/composable_stable_diffusion.py | 2 +- examples/community/imagic_stable_diffusion.py | 2 +- examples/community/img2img_inpainting.py | 2 +- examples/community/interpolate_stable_diffusion.py | 2 +- examples/community/lpw_stable_diffusion.py | 4 ++-- examples/community/lpw_stable_diffusion_onnx.py | 4 ++-- examples/community/lpw_stable_diffusion_xl.py | 2 +- examples/community/multilingual_stable_diffusion.py | 2 +- examples/community/pipeline_controlnet_xl_kolors.py | 2 +- examples/community/pipeline_controlnet_xl_kolors_img2img.py | 2 +- examples/community/pipeline_controlnet_xl_kolors_inpaint.py | 2 +- examples/community/pipeline_demofusion_sdxl.py | 2 +- .../community/pipeline_faithdiff_stable_diffusion_xl.py | 2 +- examples/community/pipeline_flux_differential_img2img.py | 4 ++-- examples/community/pipeline_flux_kontext_multiple_images.py | 2 +- examples/community/pipeline_flux_rf_inversion.py | 2 +- examples/community/pipeline_flux_semantic_guidance.py | 2 +- examples/community/pipeline_flux_with_cfg.py | 2 +- examples/community/pipeline_kolors_differential_img2img.py | 2 +- examples/community/pipeline_kolors_inpainting.py | 2 +- examples/community/pipeline_prompt2prompt.py | 2 +- examples/community/pipeline_sdxl_style_aligned.py | 2 +- .../pipeline_stable_diffusion_3_differential_img2img.py | 2 +- .../pipeline_stable_diffusion_3_instruct_pix2pix.py | 2 +- .../pipeline_stable_diffusion_xl_attentive_eraser.py | 2 +- .../pipeline_stable_diffusion_xl_controlnet_adapter.py | 2 +- ...peline_stable_diffusion_xl_controlnet_adapter_inpaint.py | 2 +- .../pipeline_stable_diffusion_xl_differential_img2img.py | 2 +- examples/community/pipeline_stable_diffusion_xl_ipex.py | 2 +- examples/community/pipeline_stg_cogvideox.py | 2 +- examples/community/pipeline_stg_ltx.py | 2 +- examples/community/pipeline_stg_ltx_image2video.py | 2 +- examples/community/pipeline_stg_mochi.py | 2 +- examples/community/pipeline_zero1to3.py | 2 +- examples/community/rerender_a_video.py | 2 +- examples/community/run_onnx_controlnet.py | 2 +- examples/community/run_tensorrt_controlnet.py | 2 +- examples/community/sd_text2img_k_diffusion.py | 2 +- examples/community/seed_resize_stable_diffusion.py | 2 +- examples/community/stable_diffusion_comparison.py | 2 +- examples/community/stable_diffusion_controlnet_img2img.py | 2 +- examples/community/stable_diffusion_controlnet_inpaint.py | 2 +- .../stable_diffusion_controlnet_inpaint_img2img.py | 2 +- examples/community/stable_diffusion_controlnet_reference.py | 2 +- examples/community/stable_diffusion_ipex.py | 2 +- examples/community/stable_diffusion_reference.py | 2 +- examples/community/stable_diffusion_repaint.py | 2 +- examples/community/stable_diffusion_xl_reference.py | 2 +- examples/community/text_inpainting.py | 2 +- examples/community/tiled_upscaling.py | 2 +- examples/community/wildcard_stable_diffusion.py | 2 +- .../pixart/pipeline_pixart_alpha_controlnet.py | 2 +- examples/research_projects/rdm/pipeline_rdm.py | 2 +- src/diffusers/pipelines/allegro/pipeline_allegro.py | 2 +- .../pipelines/animatediff/pipeline_animatediff_sdxl.py | 2 +- src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py | 2 +- .../pipelines/blip_diffusion/pipeline_blip_diffusion.py | 2 +- src/diffusers/pipelines/bria/pipeline_bria.py | 2 +- src/diffusers/pipelines/chroma/pipeline_chroma.py | 2 +- src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py | 2 +- src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py | 2 +- .../pipelines/cogvideo/pipeline_cogvideox_fun_control.py | 2 +- .../pipelines/cogvideo/pipeline_cogvideox_image2video.py | 2 +- .../pipelines/cogvideo/pipeline_cogvideox_video2video.py | 2 +- src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py | 2 +- src/diffusers/pipelines/cogview4/pipeline_cogview4.py | 2 +- .../pipelines/cogview4/pipeline_cogview4_control.py | 2 +- src/diffusers/pipelines/consisid/pipeline_consisid.py | 2 +- .../controlnet/pipeline_controlnet_blip_diffusion.py | 2 +- .../controlnet/pipeline_controlnet_inpaint_sd_xl.py | 2 +- .../controlnet/pipeline_controlnet_sd_xl_img2img.py | 2 +- .../controlnet/pipeline_controlnet_union_inpaint_sd_xl.py | 2 +- .../controlnet/pipeline_controlnet_union_sd_xl_img2img.py | 2 +- .../pipeline_stable_diffusion_3_controlnet.py | 2 +- .../pipeline_stable_diffusion_3_controlnet_inpainting.py | 2 +- .../pipeline_stable_diffusion_pix2pix_zero.py | 4 ++-- src/diffusers/pipelines/flux/pipeline_flux_control.py | 2 +- .../pipelines/flux/pipeline_flux_control_img2img.py | 2 +- .../pipelines/flux/pipeline_flux_control_inpaint.py | 4 ++-- src/diffusers/pipelines/flux/pipeline_flux_controlnet.py | 2 +- src/diffusers/pipelines/flux/pipeline_flux_fill.py | 4 ++-- src/diffusers/pipelines/flux/pipeline_flux_img2img.py | 2 +- src/diffusers/pipelines/flux/pipeline_flux_inpaint.py | 4 ++-- src/diffusers/pipelines/flux/pipeline_flux_kontext.py | 2 +- .../pipelines/flux/pipeline_flux_kontext_inpaint.py | 2 +- .../pipelines/hidream_image/pipeline_hidream_image.py | 2 +- src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py | 2 +- .../pipelines/kandinsky/pipeline_kandinsky_combined.py | 6 +++--- .../pipelines/kandinsky/pipeline_kandinsky_inpaint.py | 2 +- .../pipelines/kandinsky/pipeline_kandinsky_prior.py | 4 ++-- .../pipelines/kandinsky2_2/pipeline_kandinsky2_2.py | 2 +- .../kandinsky2_2/pipeline_kandinsky2_2_combined.py | 6 +++--- .../kandinsky2_2/pipeline_kandinsky2_2_controlnet.py | 2 +- .../kandinsky2_2/pipeline_kandinsky2_2_inpainting.py | 2 +- .../pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py | 4 ++-- .../kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py | 2 +- src/diffusers/pipelines/kolors/pipeline_kolors.py | 2 +- src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py | 2 +- src/diffusers/pipelines/latte/pipeline_latte.py | 2 +- src/diffusers/pipelines/ltx/pipeline_ltx.py | 2 +- src/diffusers/pipelines/ltx/pipeline_ltx_condition.py | 2 +- src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py | 2 +- src/diffusers/pipelines/lumina/pipeline_lumina.py | 2 +- src/diffusers/pipelines/lumina2/pipeline_lumina2.py | 2 +- src/diffusers/pipelines/mochi/pipeline_mochi.py | 2 +- src/diffusers/pipelines/omnigen/pipeline_omnigen.py | 2 +- .../pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_kolors.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_sana.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_sd_3.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py | 2 +- src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py | 2 +- .../pipelines/pixart_alpha/pipeline_pixart_alpha.py | 2 +- .../pipelines/pixart_alpha/pipeline_pixart_sigma.py | 2 +- .../pipelines/qwenimage/pipeline_qwenimage_inpaint.py | 2 +- src/diffusers/pipelines/sana/pipeline_sana.py | 2 +- src/diffusers/pipelines/sana/pipeline_sana_controlnet.py | 2 +- src/diffusers/pipelines/sana/pipeline_sana_sprint.py | 2 +- .../pipelines/sana/pipeline_sana_sprint_img2img.py | 2 +- .../pipelines/stable_cascade/pipeline_stable_cascade.py | 2 +- .../stable_cascade/pipeline_stable_cascade_combined.py | 2 +- .../stable_cascade/pipeline_stable_cascade_prior.py | 2 +- .../stable_diffusion/pipeline_onnx_stable_diffusion.py | 2 +- .../pipeline_onnx_stable_diffusion_inpaint.py | 2 +- .../pipeline_onnx_stable_diffusion_upscale.py | 2 +- .../stable_diffusion_3/pipeline_stable_diffusion_3.py | 2 +- .../pipeline_stable_diffusion_3_img2img.py | 2 +- .../pipeline_stable_diffusion_3_inpaint.py | 4 ++-- .../pipeline_stable_diffusion_k_diffusion.py | 2 +- .../pipeline_stable_diffusion_xl_k_diffusion.py | 2 +- .../stable_diffusion_xl/pipeline_stable_diffusion_xl.py | 2 +- .../pipeline_stable_diffusion_xl_img2img.py | 2 +- .../pipeline_stable_diffusion_xl_inpaint.py | 2 +- .../pipeline_stable_diffusion_xl_instruct_pix2pix.py | 2 +- .../t2i_adapter/pipeline_stable_diffusion_adapter.py | 2 +- .../t2i_adapter/pipeline_stable_diffusion_xl_adapter.py | 2 +- .../pipeline_text_to_video_zero_sdxl.py | 2 +- .../pipelines/visualcloze/pipeline_visualcloze_combined.py | 2 +- .../visualcloze/pipeline_visualcloze_generation.py | 2 +- src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py | 2 +- .../pipelines/wuerstchen/pipeline_wuerstchen_combined.py | 2 +- .../pipelines/wuerstchen/pipeline_wuerstchen_prior.py | 2 +- 145 files changed, 159 insertions(+), 159 deletions(-) diff --git a/examples/community/composable_stable_diffusion.py b/examples/community/composable_stable_diffusion.py index ec653bcdb4..a7c540ceb9 100644 --- a/examples/community/composable_stable_diffusion.py +++ b/examples/community/composable_stable_diffusion.py @@ -398,7 +398,7 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin) latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/imagic_stable_diffusion.py b/examples/community/imagic_stable_diffusion.py index a2561c9198..091d0fbf8d 100644 --- a/examples/community/imagic_stable_diffusion.py +++ b/examples/community/imagic_stable_diffusion.py @@ -147,7 +147,7 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. diff --git a/examples/community/img2img_inpainting.py b/examples/community/img2img_inpainting.py index 7b9bd043d0..499230b1e2 100644 --- a/examples/community/img2img_inpainting.py +++ b/examples/community/img2img_inpainting.py @@ -197,7 +197,7 @@ class ImageToImageInpaintingPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/interpolate_stable_diffusion.py b/examples/community/interpolate_stable_diffusion.py index 460bb464f3..5b96c14d63 100644 --- a/examples/community/interpolate_stable_diffusion.py +++ b/examples/community/interpolate_stable_diffusion.py @@ -173,7 +173,7 @@ class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/lpw_stable_diffusion.py b/examples/community/lpw_stable_diffusion.py index ccb17a51e6..cb017c0bbe 100644 --- a/examples/community/lpw_stable_diffusion.py +++ b/examples/community/lpw_stable_diffusion.py @@ -888,7 +888,7 @@ class StableDiffusionLongPromptWeightingPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. @@ -1131,7 +1131,7 @@ class StableDiffusionLongPromptWeightingPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/lpw_stable_diffusion_onnx.py b/examples/community/lpw_stable_diffusion_onnx.py index ab1462b81b..92effc1933 100644 --- a/examples/community/lpw_stable_diffusion_onnx.py +++ b/examples/community/lpw_stable_diffusion_onnx.py @@ -721,7 +721,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): @@ -918,7 +918,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): diff --git a/examples/community/lpw_stable_diffusion_xl.py b/examples/community/lpw_stable_diffusion_xl.py index ea67738ab7..272c5d5652 100644 --- a/examples/community/lpw_stable_diffusion_xl.py +++ b/examples/community/lpw_stable_diffusion_xl.py @@ -1519,7 +1519,7 @@ class SDXLLongPromptWeightingPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. prompt_embeds (`torch.Tensor`, *optional*): diff --git a/examples/community/multilingual_stable_diffusion.py b/examples/community/multilingual_stable_diffusion.py index 5e7453ed12..afef4e9e97 100644 --- a/examples/community/multilingual_stable_diffusion.py +++ b/examples/community/multilingual_stable_diffusion.py @@ -187,7 +187,7 @@ class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_controlnet_xl_kolors.py b/examples/community/pipeline_controlnet_xl_kolors.py index af5586990e..dc90aacdbc 100644 --- a/examples/community/pipeline_controlnet_xl_kolors.py +++ b/examples/community/pipeline_controlnet_xl_kolors.py @@ -888,7 +888,7 @@ class KolorsControlNetPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_controlnet_xl_kolors_img2img.py b/examples/community/pipeline_controlnet_xl_kolors_img2img.py index c0831945ed..189d031214 100644 --- a/examples/community/pipeline_controlnet_xl_kolors_img2img.py +++ b/examples/community/pipeline_controlnet_xl_kolors_img2img.py @@ -1066,7 +1066,7 @@ class KolorsControlNetImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_controlnet_xl_kolors_inpaint.py b/examples/community/pipeline_controlnet_xl_kolors_inpaint.py index db15d99ac3..4b6123cc1f 100644 --- a/examples/community/pipeline_controlnet_xl_kolors_inpaint.py +++ b/examples/community/pipeline_controlnet_xl_kolors_inpaint.py @@ -1298,7 +1298,7 @@ class KolorsControlNetInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_demofusion_sdxl.py b/examples/community/pipeline_demofusion_sdxl.py index c9b57a6ece..119b39cefe 100644 --- a/examples/community/pipeline_demofusion_sdxl.py +++ b/examples/community/pipeline_demofusion_sdxl.py @@ -724,7 +724,7 @@ class DemoFusionSDXLPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py index 43ef55d32c..aa95d2ec71 100644 --- a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py +++ b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py @@ -1906,7 +1906,7 @@ class FaithDiffStableDiffusionXLPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_differential_img2img.py b/examples/community/pipeline_flux_differential_img2img.py index 7d6358cb32..3677e73136 100644 --- a/examples/community/pipeline_flux_differential_img2img.py +++ b/examples/community/pipeline_flux_differential_img2img.py @@ -730,7 +730,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin): 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -769,7 +769,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_kontext_multiple_images.py b/examples/community/pipeline_flux_kontext_multiple_images.py index ef0c643a40..7e4a9ed0fa 100644 --- a/examples/community/pipeline_flux_kontext_multiple_images.py +++ b/examples/community/pipeline_flux_kontext_multiple_images.py @@ -885,7 +885,7 @@ class FluxKontextPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_rf_inversion.py b/examples/community/pipeline_flux_rf_inversion.py index 631d04b762..8f8b4817ac 100644 --- a/examples/community/pipeline_flux_rf_inversion.py +++ b/examples/community/pipeline_flux_rf_inversion.py @@ -711,7 +711,7 @@ class RFInversionFluxPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_semantic_guidance.py b/examples/community/pipeline_flux_semantic_guidance.py index 93bcd3af75..b3d2b3a4b4 100644 --- a/examples/community/pipeline_flux_semantic_guidance.py +++ b/examples/community/pipeline_flux_semantic_guidance.py @@ -853,7 +853,7 @@ class FluxSemanticGuidancePipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_with_cfg.py b/examples/community/pipeline_flux_with_cfg.py index 1b8dc9ecb8..3916aff257 100644 --- a/examples/community/pipeline_flux_with_cfg.py +++ b/examples/community/pipeline_flux_with_cfg.py @@ -639,7 +639,7 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_kolors_differential_img2img.py b/examples/community/pipeline_kolors_differential_img2img.py index 9491447409..d299c83981 100644 --- a/examples/community/pipeline_kolors_differential_img2img.py +++ b/examples/community/pipeline_kolors_differential_img2img.py @@ -904,7 +904,7 @@ class KolorsDifferentialImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_kolors_inpainting.py b/examples/community/pipeline_kolors_inpainting.py index cce9f10ded..3cab8ecac0 100644 --- a/examples/community/pipeline_kolors_inpainting.py +++ b/examples/community/pipeline_kolors_inpainting.py @@ -1246,7 +1246,7 @@ class KolorsInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_prompt2prompt.py b/examples/community/pipeline_prompt2prompt.py index 065edc0cfb..8d94dc9248 100644 --- a/examples/community/pipeline_prompt2prompt.py +++ b/examples/community/pipeline_prompt2prompt.py @@ -611,7 +611,7 @@ class Prompt2PromptPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_sdxl_style_aligned.py b/examples/community/pipeline_sdxl_style_aligned.py index ea168036c1..10438af365 100644 --- a/examples/community/pipeline_sdxl_style_aligned.py +++ b/examples/community/pipeline_sdxl_style_aligned.py @@ -1480,7 +1480,7 @@ class StyleAlignedSDXLPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py index 693485d175..643386232b 100644 --- a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py +++ b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py @@ -748,7 +748,7 @@ class StableDiffusion3DifferentialImg2ImgPipeline(DiffusionPipeline): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py b/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py index 6923db23a6..d9cee800e8 100644 --- a/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py +++ b/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py @@ -945,7 +945,7 @@ class StableDiffusion3InstructPix2PixPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py b/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py index ab8064c6e3..a881814c2a 100644 --- a/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py +++ b/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py @@ -1786,7 +1786,7 @@ class StableDiffusionXL_AE_Pipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py index ccf1098c61..564a19e923 100644 --- a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py +++ b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py @@ -973,7 +973,7 @@ class StableDiffusionXLControlNetAdapterPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py index 38db19148d..c73433b20f 100644 --- a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +++ b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py @@ -1329,7 +1329,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py b/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py index b9f00cb82d..89388e10cb 100644 --- a/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py +++ b/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py @@ -1053,7 +1053,7 @@ class StableDiffusionXLDifferentialImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_ipex.py b/examples/community/pipeline_stable_diffusion_xl_ipex.py index eda6089f59..aa2b24f396 100644 --- a/examples/community/pipeline_stable_diffusion_xl_ipex.py +++ b/examples/community/pipeline_stable_diffusion_xl_ipex.py @@ -832,7 +832,7 @@ class StableDiffusionXLPipelineIpex( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_cogvideox.py b/examples/community/pipeline_stg_cogvideox.py index 1c98ae0f6d..bdb6aecc30 100644 --- a/examples/community/pipeline_stg_cogvideox.py +++ b/examples/community/pipeline_stg_cogvideox.py @@ -632,7 +632,7 @@ class CogVideoXSTGPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_ltx.py b/examples/community/pipeline_stg_ltx.py index f7ccf99e96..70069a33f5 100644 --- a/examples/community/pipeline_stg_ltx.py +++ b/examples/community/pipeline_stg_ltx.py @@ -620,7 +620,7 @@ class LTXSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderM latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_ltx_image2video.py b/examples/community/pipeline_stg_ltx_image2video.py index 3b3d233380..c32805e141 100644 --- a/examples/community/pipeline_stg_ltx_image2video.py +++ b/examples/community/pipeline_stg_ltx_image2video.py @@ -682,7 +682,7 @@ class LTXImageToVideoSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVide latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_mochi.py b/examples/community/pipeline_stg_mochi.py index b6ab1b192c..dbe5d2525a 100644 --- a/examples/community/pipeline_stg_mochi.py +++ b/examples/community/pipeline_stg_mochi.py @@ -603,7 +603,7 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_zero1to3.py b/examples/community/pipeline_zero1to3.py index 0db543b169..9e29566978 100644 --- a/examples/community/pipeline_zero1to3.py +++ b/examples/community/pipeline_zero1to3.py @@ -657,7 +657,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/rerender_a_video.py b/examples/community/rerender_a_video.py index 133c232943..78a15a03b0 100644 --- a/examples/community/rerender_a_video.py +++ b/examples/community/rerender_a_video.py @@ -656,7 +656,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/run_onnx_controlnet.py b/examples/community/run_onnx_controlnet.py index 2221fc09db..f0ab2a2b96 100644 --- a/examples/community/run_onnx_controlnet.py +++ b/examples/community/run_onnx_controlnet.py @@ -591,7 +591,7 @@ class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/run_tensorrt_controlnet.py b/examples/community/run_tensorrt_controlnet.py index b9e71724c0..e4f1abc83b 100644 --- a/examples/community/run_tensorrt_controlnet.py +++ b/examples/community/run_tensorrt_controlnet.py @@ -695,7 +695,7 @@ class TensorRTStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/sd_text2img_k_diffusion.py b/examples/community/sd_text2img_k_diffusion.py index ab6cf2d9cd..4d5cea497f 100755 --- a/examples/community/sd_text2img_k_diffusion.py +++ b/examples/community/sd_text2img_k_diffusion.py @@ -326,7 +326,7 @@ class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/seed_resize_stable_diffusion.py b/examples/community/seed_resize_stable_diffusion.py index 3c823012c1..eafe7572aa 100644 --- a/examples/community/seed_resize_stable_diffusion.py +++ b/examples/community/seed_resize_stable_diffusion.py @@ -122,7 +122,7 @@ class SeedResizeStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin) latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/stable_diffusion_comparison.py b/examples/community/stable_diffusion_comparison.py index 36e7dba2de..22f3b3e0c3 100644 --- a/examples/community/stable_diffusion_comparison.py +++ b/examples/community/stable_diffusion_comparison.py @@ -279,7 +279,7 @@ class StableDiffusionComparisonPipeline(DiffusionPipeline, StableDiffusionMixin) latents (`torch.Tensor`, optional): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, optional, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/stable_diffusion_controlnet_img2img.py b/examples/community/stable_diffusion_controlnet_img2img.py index 877464454a..6d8038cfd4 100644 --- a/examples/community/stable_diffusion_controlnet_img2img.py +++ b/examples/community/stable_diffusion_controlnet_img2img.py @@ -670,7 +670,7 @@ class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline, StableDiffusio latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_inpaint.py b/examples/community/stable_diffusion_controlnet_inpaint.py index 175c47d015..fe7b808b6b 100644 --- a/examples/community/stable_diffusion_controlnet_inpaint.py +++ b/examples/community/stable_diffusion_controlnet_inpaint.py @@ -810,7 +810,7 @@ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline, StableDiffusio latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_inpaint_img2img.py b/examples/community/stable_diffusion_controlnet_inpaint_img2img.py index 51e7ac38dd..2b5dc77fe5 100644 --- a/examples/community/stable_diffusion_controlnet_inpaint_img2img.py +++ b/examples/community/stable_diffusion_controlnet_inpaint_img2img.py @@ -804,7 +804,7 @@ class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline, StableD latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_reference.py b/examples/community/stable_diffusion_controlnet_reference.py index aa9ab1b242..e5dd249e04 100644 --- a/examples/community/stable_diffusion_controlnet_reference.py +++ b/examples/community/stable_diffusion_controlnet_reference.py @@ -179,7 +179,7 @@ class StableDiffusionControlNetReferencePipeline(StableDiffusionControlNetPipeli latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_ipex.py b/examples/community/stable_diffusion_ipex.py index 18d5e8feaa..7d1cd4f5d0 100644 --- a/examples/community/stable_diffusion_ipex.py +++ b/examples/community/stable_diffusion_ipex.py @@ -615,7 +615,7 @@ class StableDiffusionIPEXPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_reference.py b/examples/community/stable_diffusion_reference.py index 69fa0722cf..6f7dce9823 100644 --- a/examples/community/stable_diffusion_reference.py +++ b/examples/community/stable_diffusion_reference.py @@ -885,7 +885,7 @@ class StableDiffusionReferencePipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_repaint.py b/examples/community/stable_diffusion_repaint.py index 9f6172f3b8..94b9f8b01b 100644 --- a/examples/community/stable_diffusion_repaint.py +++ b/examples/community/stable_diffusion_repaint.py @@ -678,7 +678,7 @@ class StableDiffusionRepaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_xl_reference.py b/examples/community/stable_diffusion_xl_reference.py index 11926a5d9a..eb05557496 100644 --- a/examples/community/stable_diffusion_xl_reference.py +++ b/examples/community/stable_diffusion_xl_reference.py @@ -380,7 +380,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/text_inpainting.py b/examples/community/text_inpainting.py index 2908388029..f262cf2cac 100644 --- a/examples/community/text_inpainting.py +++ b/examples/community/text_inpainting.py @@ -180,7 +180,7 @@ class TextInpainting(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/tiled_upscaling.py b/examples/community/tiled_upscaling.py index 56eb3e89b5..7a5e77155c 100644 --- a/examples/community/tiled_upscaling.py +++ b/examples/community/tiled_upscaling.py @@ -231,7 +231,7 @@ class StableDiffusionTiledUpscalePipeline(StableDiffusionUpscalePipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. tile_size (`int`, *optional*): The size of the tiles. Too big can result in an OOM-error. tile_border (`int`, *optional*): diff --git a/examples/community/wildcard_stable_diffusion.py b/examples/community/wildcard_stable_diffusion.py index c750610ca3..d40221e5b1 100644 --- a/examples/community/wildcard_stable_diffusion.py +++ b/examples/community/wildcard_stable_diffusion.py @@ -209,7 +209,7 @@ class WildcardStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py b/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py index 148b2e7f31..89228983d4 100644 --- a/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py +++ b/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py @@ -860,7 +860,7 @@ class PixArtAlphaControlnetPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/research_projects/rdm/pipeline_rdm.py b/examples/research_projects/rdm/pipeline_rdm.py index 7e2095b724..9b696874c5 100644 --- a/examples/research_projects/rdm/pipeline_rdm.py +++ b/examples/research_projects/rdm/pipeline_rdm.py @@ -202,7 +202,7 @@ class RDMPipeline(DiffusionPipeline, StableDiffusionMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/allegro/pipeline_allegro.py b/src/diffusers/pipelines/allegro/pipeline_allegro.py index 0993c8b912..2c9548706e 100644 --- a/src/diffusers/pipelines/allegro/pipeline_allegro.py +++ b/src/diffusers/pipelines/allegro/pipeline_allegro.py @@ -760,7 +760,7 @@ class AllegroPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): generation. Can be used to tweak the same generation with different prompts. If not provided, a latents Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py index 260669ddaf..56d3190275 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py @@ -971,7 +971,7 @@ class AnimateDiffSDXLPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py b/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py index 7ff9925c45..6251ca4435 100644 --- a/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py +++ b/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py @@ -497,7 +497,7 @@ class AuraFlowPipeline(DiffusionPipeline, AuraFlowLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py index 439dc511a0..8cd463c970 100644 --- a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +++ b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py @@ -228,7 +228,7 @@ class BlipDiffusionPipeline(DeprecatedPipelineMixin, DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by random sampling. + tensor will be generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/bria/pipeline_bria.py b/src/diffusers/pipelines/bria/pipeline_bria.py index 39ed484793..ebddfb0c0e 100644 --- a/src/diffusers/pipelines/bria/pipeline_bria.py +++ b/src/diffusers/pipelines/bria/pipeline_bria.py @@ -506,7 +506,7 @@ class BriaPipeline(DiffusionPipeline): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 3a34ec2a42..a3dd1422b8 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -676,7 +676,7 @@ class ChromaPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py index e169db4a4d..233f4c43a1 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py @@ -744,7 +744,7 @@ class ChromaImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py index 3c5994172c..4ac33b24bb 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py @@ -571,7 +571,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py index cf6ccebc47..c1335839f8 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py @@ -616,7 +616,7 @@ class CogVideoXFunControlPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. control_video_latents (`torch.Tensor`, *optional*): Pre-generated control latents, sampled from a Gaussian distribution, to be used as inputs for controlled video generation. If not provided, `control_video` must be provided. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py index d1f02ca9c9..225240927f 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py @@ -671,7 +671,7 @@ class CogVideoXImageToVideoPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin) latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py index 230c8ca296..897dc6d1b7 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py @@ -641,7 +641,7 @@ class CogVideoXVideoToVideoPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin) latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py index f2f852c213..304a5c5ad0 100644 --- a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py +++ b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py @@ -466,7 +466,7 @@ class CogView3PlusPipeline(DiffusionPipeline): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview4/pipeline_cogview4.py b/src/diffusers/pipelines/cogview4/pipeline_cogview4.py index d8374b694f..22510f5d9d 100644 --- a/src/diffusers/pipelines/cogview4/pipeline_cogview4.py +++ b/src/diffusers/pipelines/cogview4/pipeline_cogview4.py @@ -466,7 +466,7 @@ class CogView4Pipeline(DiffusionPipeline, CogView4LoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py b/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py index ac8d786f04..e26b7ba415 100644 --- a/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py +++ b/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py @@ -499,7 +499,7 @@ class CogView4ControlPipeline(DiffusionPipeline): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/consisid/pipeline_consisid.py b/src/diffusers/pipelines/consisid/pipeline_consisid.py index 644bd811f6..3e6c149d7f 100644 --- a/src/diffusers/pipelines/consisid/pipeline_consisid.py +++ b/src/diffusers/pipelines/consisid/pipeline_consisid.py @@ -733,7 +733,7 @@ class ConsisIDPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py index 598e3b5b6d..c2ae408778 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py @@ -279,7 +279,7 @@ class BlipDiffusionControlNetPipeline(DeprecatedPipelineMixin, DiffusionPipeline latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by random sampling. + tensor will be generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py index 4aa2a62a53..397ab15715 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py @@ -1326,7 +1326,7 @@ class StableDiffusionXLControlNetInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py index 526e1ffcb2..4d4845c5a0 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py @@ -1197,7 +1197,7 @@ class StableDiffusionXLControlNetImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py index 7fa59395a8..fb58b22211 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py @@ -1310,7 +1310,7 @@ class StableDiffusionXLControlNetUnionInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py index 65e2fe6617..8fedb6d860 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py @@ -1185,7 +1185,7 @@ class StableDiffusionXLControlNetUnionImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py index e31e3a0178..c763411ab5 100644 --- a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +++ b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py @@ -918,7 +918,7 @@ class StableDiffusion3ControlNetPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py index 000e080d3a..c33cf979c6 100644 --- a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +++ b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py @@ -973,7 +973,7 @@ class StableDiffusion3ControlNetInpaintingPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index f9034a5844..d000d87e6a 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -880,7 +880,7 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline, StableDiffusionMixin latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. @@ -1151,7 +1151,7 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline, StableDiffusionMixin latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control.py b/src/diffusers/pipelines/flux/pipeline_flux_control.py index 51d6ecbe31..cc9ebb4754 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control.py @@ -674,7 +674,7 @@ class FluxControlPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py b/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py index c61d46daef..262345c75a 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py @@ -712,7 +712,7 @@ class FluxControlImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSin latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py index 3de636361b..5acc5080f5 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py @@ -838,7 +838,7 @@ class FluxControlInpaintPipeline( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -870,7 +870,7 @@ class FluxControlInpaintPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index a39b9c9ce2..507ec68734 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -764,7 +764,7 @@ class FluxControlNetPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleF latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_fill.py b/src/diffusers/pipelines/flux/pipeline_flux_fill.py index d50db407a8..956f6fb106 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_fill.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_fill.py @@ -775,7 +775,7 @@ class FluxFillPipeline( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -807,7 +807,7 @@ class FluxFillPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py index 08e2f12778..4a9f2bad6a 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py @@ -787,7 +787,7 @@ class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py index 0494146693..3bfe82cf43 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py @@ -834,7 +834,7 @@ class FluxInpaintPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FluxIPAdapterM 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -873,7 +873,7 @@ class FluxInpaintPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FluxIPAdapterM latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py index ce2941f3dd..87011299c4 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py @@ -808,7 +808,7 @@ class FluxKontextPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py index 56a5e934a4..3cdb8caea2 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py @@ -1029,7 +1029,7 @@ class FluxKontextInpaintPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py index 695f54f3d9..bf36ca2fa3 100644 --- a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py +++ b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py @@ -789,7 +789,7 @@ class HiDreamImagePipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py index 89fea89337..92f612f541 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -291,7 +291,7 @@ class KandinskyPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 90d4042ae2..7286bcbee1 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -271,7 +271,7 @@ class KandinskyCombinedPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -502,7 +502,7 @@ class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -742,7 +742,7 @@ class KandinskyInpaintCombinedPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index 5645d2a56e..cde0b8fd0a 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -469,7 +469,7 @@ class KandinskyInpaintPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index 8781d706ed..10ea8005c9 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -212,7 +212,7 @@ class KandinskyPriorPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). @@ -437,7 +437,7 @@ class KandinskyPriorPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index 3ecc0ebd5b..429253e998 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -175,7 +175,7 @@ class KandinskyV22Pipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index e0b88b41e8..fc2083247b 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -262,7 +262,7 @@ class KandinskyV22CombinedPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -512,7 +512,7 @@ class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -749,7 +749,7 @@ class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py index b9f98f5458..c5faae8279 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -211,7 +211,7 @@ class KandinskyV22ControlnetPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 22171849bb..a61673293e 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -356,7 +356,7 @@ class KandinskyV22InpaintPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index 68954c2dc8..0e7e16f9dd 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -171,7 +171,7 @@ class KandinskyV22PriorPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). @@ -412,7 +412,7 @@ class KandinskyV22PriorPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py index 13ea2ad6af..1a7198b968 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -195,7 +195,7 @@ class KandinskyV22PriorEmb2EmbPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). diff --git a/src/diffusers/pipelines/kolors/pipeline_kolors.py b/src/diffusers/pipelines/kolors/pipeline_kolors.py index 1fa9f6ce1d..948f73ed91 100644 --- a/src/diffusers/pipelines/kolors/pipeline_kolors.py +++ b/src/diffusers/pipelines/kolors/pipeline_kolors.py @@ -749,7 +749,7 @@ class KolorsPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionLor latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py b/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py index e3cf4f2276..67d49b9a8c 100644 --- a/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py +++ b/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py @@ -900,7 +900,7 @@ class KolorsImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffu latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/latte/pipeline_latte.py b/src/diffusers/pipelines/latte/pipeline_latte.py index 0e60d5c7ac..4d42a7049e 100644 --- a/src/diffusers/pipelines/latte/pipeline_latte.py +++ b/src/diffusers/pipelines/latte/pipeline_latte.py @@ -679,7 +679,7 @@ class LattePipeline(DiffusionPipeline): latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx.py b/src/diffusers/pipelines/ltx/pipeline_ltx.py index 77ba751700..bd23e657c4 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx.py @@ -601,7 +601,7 @@ class LTXPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixi latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py b/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py index 217478f418..537588f67c 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py @@ -938,7 +938,7 @@ class LTXConditionPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraL latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py index 8793d81377..694378b4f0 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py @@ -665,7 +665,7 @@ class LTXImageToVideoPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLo latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/lumina/pipeline_lumina.py b/src/diffusers/pipelines/lumina/pipeline_lumina.py index 2067444fa0..b59c265646 100644 --- a/src/diffusers/pipelines/lumina/pipeline_lumina.py +++ b/src/diffusers/pipelines/lumina/pipeline_lumina.py @@ -697,7 +697,7 @@ class LuminaPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py index 0fa0fe9773..c4df7ba1c3 100644 --- a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py +++ b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py @@ -564,7 +564,7 @@ class Lumina2Pipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/mochi/pipeline_mochi.py b/src/diffusers/pipelines/mochi/pipeline_mochi.py index 3c0f908296..5581529b23 100644 --- a/src/diffusers/pipelines/mochi/pipeline_mochi.py +++ b/src/diffusers/pipelines/mochi/pipeline_mochi.py @@ -534,7 +534,7 @@ class MochiPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py index 1254b6725f..f5a535b2da 100644 --- a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py +++ b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py @@ -366,7 +366,7 @@ class OmniGenPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py index 913a647fae..a6df1b22c8 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py @@ -1199,7 +1199,7 @@ class StableDiffusionXLControlNetPAGImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_kolors.py b/src/diffusers/pipelines/pag/pipeline_pag_kolors.py index ed8e33e2ba..1368358db6 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_kolors.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_kolors.py @@ -769,7 +769,7 @@ class KolorsPAGPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py b/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py index d9d6d14a38..9031877b5b 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py @@ -644,7 +644,7 @@ class PixArtSigmaPAGPipeline(DiffusionPipeline, PAGMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sana.py b/src/diffusers/pipelines/pag/pipeline_pag_sana.py index 8dbae13a3f..5857eeeb04 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sana.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sana.py @@ -703,7 +703,7 @@ class SanaPAGPipeline(DiffusionPipeline, PAGMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py index 96796f53b0..acb4e52340 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py @@ -761,7 +761,7 @@ class StableDiffusion3PAGPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSin latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py index 202120dc2c..e1819a79fb 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py @@ -822,7 +822,7 @@ class StableDiffusion3PAGImg2ImgPipeline(DiffusionPipeline, SD3LoraLoaderMixin, latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py index 4504684133..6b62ddcc7c 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py @@ -948,7 +948,7 @@ class StableDiffusionXLPAGPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py index 8c355a5fb1..b6422b2364 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py @@ -1111,7 +1111,7 @@ class StableDiffusionXLPAGImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py index 7d42d1876a..2e12a4a97f 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py @@ -1251,7 +1251,7 @@ class StableDiffusionXLPAGInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py index bd69746be3..1d718a4852 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -755,7 +755,7 @@ class PixArtAlphaPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py index c14036cf94..bb169ac5c4 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py @@ -700,7 +700,7 @@ class PixArtSigmaPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py index c2766baf8b..2340896133 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py @@ -667,7 +667,7 @@ class QwenImageInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): diff --git a/src/diffusers/pipelines/sana/pipeline_sana.py b/src/diffusers/pipelines/sana/pipeline_sana.py index 103f57a236..c54fec5b3a 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana.py +++ b/src/diffusers/pipelines/sana/pipeline_sana.py @@ -781,7 +781,7 @@ class SanaPipeline(DiffusionPipeline, SanaLoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py index cdc602b964..17d6dfd83e 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py @@ -844,7 +844,7 @@ class SanaControlNetPipeline(DiffusionPipeline, SanaLoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py index e8f9d8368f..a140cc1672 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py @@ -663,7 +663,7 @@ class SanaSprintPipeline(DiffusionPipeline, SanaLoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py index bf290c3ced..34d3b9d17e 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py @@ -736,7 +736,7 @@ class SanaSprintImg2ImgPipeline(DiffusionPipeline, SanaLoraLoaderMixin): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py index 6130a9873c..aa39983c4e 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py @@ -362,7 +362,7 @@ class StableCascadeDecoderPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py index b705c7e6e5..b3dc23f2e5 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py @@ -237,7 +237,7 @@ class StableCascadeCombinedPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py index b3b46af206..9e63b3489c 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py @@ -442,7 +442,7 @@ class StableCascadePriorPipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py index 06c2076816..6ebe0986a1 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py @@ -313,7 +313,7 @@ class OnnxStableDiffusionPipeline(DiffusionPipeline): latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py index 141d849ec3..158bcabbeb 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py @@ -378,7 +378,7 @@ class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline): latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py index 882fa98b07..a765163175 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py @@ -398,7 +398,7 @@ class OnnxStableDiffusionUpscalePipeline(DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py index afee3f61e9..1618f89a49 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py @@ -854,7 +854,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py index fa1e0a4f32..7e97909f42 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py @@ -909,7 +909,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline, SD3LoraLoaderMixin, Fro latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py index 937f7195b2..bed596e57c 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py @@ -984,7 +984,7 @@ class StableDiffusion3InpaintPipeline(DiffusionPipeline, SD3LoraLoaderMixin, Fro 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): @@ -1033,7 +1033,7 @@ class StableDiffusion3InpaintPipeline(DiffusionPipeline, SD3LoraLoaderMixin, Fro latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py index 350a492826..df2564a89b 100755 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -539,7 +539,7 @@ class StableDiffusionKDiffusionPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py index 3b57555071..766ca37d81 100644 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py @@ -652,7 +652,7 @@ class StableDiffusionXLKDiffusionPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py index 9ac64a0d84..b97cf6f1f6 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py @@ -937,7 +937,7 @@ class StableDiffusionXLPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py index e63c7a55ce..44e8f4fe4b 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py @@ -1097,7 +1097,7 @@ class StableDiffusionXLImg2ImgPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py index f0bc9b9bb3..18f8536a75 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py @@ -1251,7 +1251,7 @@ class StableDiffusionXLInpaintPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py index b1379d1b29..58b0083617 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py @@ -695,7 +695,7 @@ class StableDiffusionXLInstructPix2PixPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py index 5c561721fc..1ce6987114 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -760,7 +760,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline, StableDiffusionMixin, Fr latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index 13183df47d..2802d690f3 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -971,7 +971,7 @@ class StableDiffusionXLAdapterPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py index a9fa43c1f5..288aae6c0d 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +++ b/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -1051,7 +1051,7 @@ class TextToVideoZeroSDXLPipeline( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. motion_field_strength_x (`float`, *optional*, defaults to 12): Strength of motion in generated video along x-axis. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py index 68130baad7..4e5b32c10c 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py @@ -319,7 +319,7 @@ class VisualClozePipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py index e7a1d4a4b2..8571211cd0 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py @@ -736,7 +736,7 @@ class VisualClozeGenerationPipeline( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py index b9b02a6dd3..bbdb60471f 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py @@ -263,7 +263,7 @@ class WuerstchenDecoderPipeline(DeprecatedPipelineMixin, DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py index 00a88ce34e..c54c1fefe8 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py @@ -222,7 +222,7 @@ class WuerstchenCombinedPipeline(DeprecatedPipelineMixin, DiffusionPipeline): latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py index a32f09204d..e138b6e805 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py @@ -348,7 +348,7 @@ class WuerstchenPriorPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin) latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). From 0fd7ee79ea54304a9e04921e5c8c841e1765de73 Mon Sep 17 00:00:00 2001 From: Leo Jiang Date: Tue, 26 Aug 2025 01:23:55 -0600 Subject: [PATCH 21/74] NPU attention refactor for FLUX (#12209) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * NPU attention refactor for FLUX transformer * Apply style fixes --------- Co-authored-by: J石页 Co-authored-by: Aryan Co-authored-by: github-actions[bot] --- examples/dreambooth/train_dreambooth_flux.py | 8 ++++++++ .../dreambooth/train_dreambooth_lora_flux.py | 9 +++++++++ .../train_dreambooth_lora_flux_kontext.py | 8 ++++++++ .../models/transformers/transformer_flux.py | 17 ++--------------- 4 files changed, 27 insertions(+), 15 deletions(-) diff --git a/examples/dreambooth/train_dreambooth_flux.py b/examples/dreambooth/train_dreambooth_flux.py index b803babdc8..c24d16c600 100644 --- a/examples/dreambooth/train_dreambooth_flux.py +++ b/examples/dreambooth/train_dreambooth_flux.py @@ -642,6 +642,7 @@ def parse_args(input_args=None): ], help="The image interpolation method to use for resizing images.", ) + parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1182,6 +1183,13 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + transformer.set_attention_backend("_native_npu") + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") + # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/dreambooth/train_dreambooth_lora_flux.py b/examples/dreambooth/train_dreambooth_lora_flux.py index a8a76097f3..2353625c38 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux.py +++ b/examples/dreambooth/train_dreambooth_lora_flux.py @@ -80,6 +80,7 @@ from diffusers.utils import ( is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card +from diffusers.utils.import_utils import is_torch_npu_available from diffusers.utils.torch_utils import is_compiled_module @@ -686,6 +687,7 @@ def parse_args(input_args=None): ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1213,6 +1215,13 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + transformer.set_attention_backend("_native_npu") + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") + # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py index 6aa165ed20..ffeef7b4b3 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py +++ b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py @@ -706,6 +706,7 @@ def parse_args(input_args=None): ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1354,6 +1355,13 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + transformer.set_attention_backend("_native_npu") + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") + # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/src/diffusers/models/transformers/transformer_flux.py b/src/diffusers/models/transformers/transformer_flux.py index 60c7eb1dba..7ab371a1a1 100644 --- a/src/diffusers/models/transformers/transformer_flux.py +++ b/src/diffusers/models/transformers/transformer_flux.py @@ -22,8 +22,7 @@ import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin -from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers -from ...utils.import_utils import is_torch_npu_available +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn @@ -354,25 +353,13 @@ class FluxSingleTransformerBlock(nn.Module): self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) - if is_torch_npu_available(): - from ..attention_processor import FluxAttnProcessor2_0_NPU - - deprecation_message = ( - "Defaulting to FluxAttnProcessor2_0_NPU for NPU devices will be removed. Attention processors " - "should be set explicitly using the `set_attn_processor` method." - ) - deprecate("npu_processor", "0.34.0", deprecation_message) - processor = FluxAttnProcessor2_0_NPU() - else: - processor = FluxAttnProcessor() - self.attn = FluxAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, - processor=processor, + processor=FluxAttnProcessor(), eps=1e-6, pre_only=True, ) From 5fcd5f560fd4681e71698980ac80179abc40987b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tolga=20Cang=C3=B6z?= <46008593+tolgacangoz@users.noreply.github.com> Date: Tue, 26 Aug 2025 10:24:19 +0300 Subject: [PATCH 22/74] Propose to update & upgrade SkyReels-V2 (#12167) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: update SkyReels-V2 documentation and moving into attn dispatcher * Refactors SkyReelsV2's attention implementation * style * up * Fixes formatting in SkyReels-V2 documentation Wraps the visual demonstration section in a Markdown code block. This change corrects the rendering of ASCII diagrams and examples, improving the overall readability of the document. * Docs: Condense example arrays in skyreels_v2 guide Improves the readability of the `step_matrix` examples by replacing long sequences of repeated numbers with a more compact `value×count` notation. This change makes the underlying data patterns in the examples easier to understand at a glance. * Add _repeated_blocks attribute to SkyReelsV2Transformer3DModel * Refactor rotary embedding calculations in SkyReelsV2 to separate cosine and sine frequencies * Enhance SkyReels-V2 documentation: update model loading for GPU support and remove outdated notes * up * up * Update model_id in SkyReels-V2 documentation * up * refactor: remove device_map parameter for model loading and add pipeline.to("cuda") for GPU allocation * fix: update copyright year to 2025 in skyreels_v2.md * docs: enhance parameter examples and formatting in skyreels_v2.md * docs: update example formatting and add notes on LoRA support in skyreels_v2.md * refactor: remove copied comments from transformer_wan in SkyReelsV2 classes * Clean up comments in skyreels_v2.md Removed comments about acceleration helpers and Flash Attention installation. * Add deprecation warning for `SkyReelsV2AttnProcessor2_0` class --- docs/source/en/api/pipelines/skyreels_v2.md | 247 ++++++------- .../transformers/transformer_skyreels_v2.py | 330 +++++++++++++----- 2 files changed, 365 insertions(+), 212 deletions(-) diff --git a/docs/source/en/api/pipelines/skyreels_v2.md b/docs/source/en/api/pipelines/skyreels_v2.md index cd94f2a75c..6730f15516 100644 --- a/docs/source/en/api/pipelines/skyreels_v2.md +++ b/docs/source/en/api/pipelines/skyreels_v2.md @@ -1,4 +1,4 @@ - - -# JAX/Flax - -[[open-in-colab]] - -🤗 Diffusers supports Flax for super fast inference on Google TPUs, such as those available in Colab, Kaggle or Google Cloud Platform. This guide shows you how to run inference with Stable Diffusion using JAX/Flax. - -Before you begin, make sure you have the necessary libraries installed: - -```py -# uncomment to install the necessary libraries in Colab -#!pip install -q jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy -#!pip install -q diffusers -``` - -You should also make sure you're using a TPU backend. While JAX does not run exclusively on TPUs, you'll get the best performance on a TPU because each server has 8 TPU accelerators working in parallel. - -If you are running this guide in Colab, select *Runtime* in the menu above, select the option *Change runtime type*, and then select *TPU* under the *Hardware accelerator* setting. Import JAX and quickly check whether you're using a TPU: - -```python -import jax -import jax.tools.colab_tpu -jax.tools.colab_tpu.setup_tpu() - -num_devices = jax.device_count() -device_type = jax.devices()[0].device_kind - -print(f"Found {num_devices} JAX devices of type {device_type}.") -assert ( - "TPU" in device_type, - "Available device is not a TPU, please select TPU from Runtime > Change runtime type > Hardware accelerator" -) -# Found 8 JAX devices of type Cloud TPU. -``` - -Great, now you can import the rest of the dependencies you'll need: - -```python -import jax.numpy as jnp -from jax import pmap -from flax.jax_utils import replicate -from flax.training.common_utils import shard - -from diffusers import FlaxStableDiffusionPipeline -``` - -## Load a model - -Flax is a functional framework, so models are stateless and parameters are stored outside of them. Loading a pretrained Flax pipeline returns *both* the pipeline and the model weights (or parameters). In this guide, you'll use `bfloat16`, a more efficient half-float type that is supported by TPUs (you can also use `float32` for full precision if you want). - -```python -dtype = jnp.bfloat16 -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - variant="bf16", - dtype=dtype, -) -``` - -## Inference - -TPUs usually have 8 devices working in parallel, so let's use the same prompt for each device. This means you can perform inference on 8 devices at once, with each device generating one image. As a result, you'll get 8 images in the same amount of time it takes for one chip to generate a single image! - - - -Learn more details in the [How does parallelization work?](#how-does-parallelization-work) section. - - - -After replicating the prompt, get the tokenized text ids by calling the `prepare_inputs` function on the pipeline. The length of the tokenized text is set to 77 tokens as required by the configuration of the underlying CLIP text model. - -```python -prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic" -prompt = [prompt] * jax.device_count() -prompt_ids = pipeline.prepare_inputs(prompt) -prompt_ids.shape -# (8, 77) -``` - -Model parameters and inputs have to be replicated across the 8 parallel devices. The parameters dictionary is replicated with [`flax.jax_utils.replicate`](https://flax.readthedocs.io/en/latest/api_reference/flax.jax_utils.html#flax.jax_utils.replicate) which traverses the dictionary and changes the shape of the weights so they are repeated 8 times. Arrays are replicated using `shard`. - -```python -# parameters -p_params = replicate(params) - -# arrays -prompt_ids = shard(prompt_ids) -prompt_ids.shape -# (8, 1, 77) -``` - -This shape means each one of the 8 devices receives as an input a `jnp` array with shape `(1, 77)`, where `1` is the batch size per device. On TPUs with sufficient memory, you could have a batch size larger than `1` if you want to generate multiple images (per chip) at once. - -Next, create a random number generator to pass to the generation function. This is standard procedure in Flax, which is very serious and opinionated about random numbers. All functions that deal with random numbers are expected to receive a generator to ensure reproducibility, even when you're training across multiple distributed devices. - -The helper function below uses a seed to initialize a random number generator. As long as you use the same seed, you'll get the exact same results. Feel free to use different seeds when exploring results later in the guide. - -```python -def create_key(seed=0): - return jax.random.PRNGKey(seed) -``` - -The helper function, or `rng`, is split 8 times so each device receives a different generator and generates a different image. - -```python -rng = create_key(0) -rng = jax.random.split(rng, jax.device_count()) -``` - -To take advantage of JAX's optimized speed on a TPU, pass `jit=True` to the pipeline to compile the JAX code into an efficient representation and to ensure the model runs in parallel across the 8 devices. - - - -You need to ensure all your inputs have the same shape in subsequent calls, otherwise JAX will need to recompile the code which is slower. - - - -The first inference run takes more time because it needs to compile the code, but subsequent calls (even with different inputs) are much faster. For example, it took more than a minute to compile on a TPU v2-8, but then it takes about **7s** on a future inference run! - -```py -%%time -images = pipeline(prompt_ids, p_params, rng, jit=True)[0] - -# CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s -# Wall time: 1min 29s -``` - -The returned array has shape `(8, 1, 512, 512, 3)` which should be reshaped to remove the second dimension and get 8 images of `512 × 512 × 3`. Then you can use the [`~utils.numpy_to_pil`] function to convert the arrays into images. - -```python -from diffusers.utils import make_image_grid - -images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) -images = pipeline.numpy_to_pil(images) -make_image_grid(images, rows=2, cols=4) -``` - -![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_38_output_0.jpeg) - -## Using different prompts - -You don't necessarily have to use the same prompt on all devices. For example, to generate 8 different prompts: - -```python -prompts = [ - "Labrador in the style of Hokusai", - "Painting of a squirrel skating in New York", - "HAL-9000 in the style of Van Gogh", - "Times Square under water, with fish and a dolphin swimming around", - "Ancient Roman fresco showing a man working on his laptop", - "Close-up photograph of young black woman against urban background, high quality, bokeh", - "Armchair in the shape of an avocado", - "Clown astronaut in space, with Earth in the background", -] - -prompt_ids = pipeline.prepare_inputs(prompts) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, p_params, rng, jit=True).images -images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) -images = pipeline.numpy_to_pil(images) - -make_image_grid(images, 2, 4) -``` - -![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_43_output_0.jpeg) - -## How does parallelization work? - -The Flax pipeline in 🤗 Diffusers automatically compiles the model and runs it in parallel on all available devices. Let's take a closer look at how that process works. - -JAX parallelization can be done in multiple ways. The easiest one revolves around using the [`jax.pmap`](https://jax.readthedocs.io/en/latest/_autosummary/jax.pmap.html) function to achieve single-program multiple-data (SPMD) parallelization. It means running several copies of the same code, each on different data inputs. More sophisticated approaches are possible, and you can go over to the JAX [documentation](https://jax.readthedocs.io/en/latest/index.html) to explore this topic in more detail if you are interested! - -`jax.pmap` does two things: - -1. Compiles (or "`jit`s") the code which is similar to `jax.jit()`. This does not happen when you call `pmap`, and only the first time the `pmap`ped function is called. -2. Ensures the compiled code runs in parallel on all available devices. - -To demonstrate, call `pmap` on the pipeline's `_generate` method (this is a private method that generates images and may be renamed or removed in future releases of 🤗 Diffusers): - -```python -p_generate = pmap(pipeline._generate) -``` - -After calling `pmap`, the prepared function `p_generate` will: - -1. Make a copy of the underlying function, `pipeline._generate`, on each device. -2. Send each device a different portion of the input arguments (this is why it's necessary to call the *shard* function). In this case, `prompt_ids` has shape `(8, 1, 77, 768)` so the array is split into 8 and each copy of `_generate` receives an input with shape `(1, 77, 768)`. - -The most important thing to pay attention to here is the batch size (1 in this example), and the input dimensions that make sense for your code. You don't have to change anything else to make the code work in parallel. - -The first time you call the pipeline takes more time, but the calls afterward are much faster. The `block_until_ready` function is used to correctly measure inference time because JAX uses asynchronous dispatch and returns control to the Python loop as soon as it can. You don't need to use that in your code; blocking occurs automatically when you want to use the result of a computation that has not yet been materialized. - -```py -%%time -images = p_generate(prompt_ids, p_params, rng) -images = images.block_until_ready() - -# CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s -# Wall time: 1min 15s -``` - -Check your image dimensions to see if they're correct: - -```python -images.shape -# (8, 1, 512, 512, 3) -``` - -## Resources - -To learn more about how JAX works with Stable Diffusion, you may be interested in reading: - -* [Accelerating Stable Diffusion XL Inference with JAX on Cloud TPU v5e](https://hf.co/blog/sdxl_jax) From cbecc33570cf219ca8460f465bb427725ece01a0 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Wed, 27 Aug 2025 11:35:31 -0700 Subject: [PATCH 29/74] [docs] Reproducibility (#12237) * init * dupe * feedback --- docs/source/en/_toctree.yml | 4 +- .../en/using-diffusers/reusing_seeds.md | 153 +++++++----------- 2 files changed, 56 insertions(+), 101 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index bf7f9c1354..a0ddf8f256 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -21,7 +21,7 @@ - local: using-diffusers/callback title: Pipeline callbacks - local: using-diffusers/reusing_seeds - title: Reproducible pipelines + title: Reproducibility - local: using-diffusers/schedulers title: Load schedulers and models - local: using-diffusers/scheduler_features @@ -62,8 +62,6 @@ title: Scheduler features - local: using-diffusers/callback title: Pipeline callbacks - - local: using-diffusers/reusing_seeds - title: Reproducible pipelines - local: using-diffusers/image_quality title: Controlling image quality diff --git a/docs/source/en/using-diffusers/reusing_seeds.md b/docs/source/en/using-diffusers/reusing_seeds.md index ac9350f24c..b4aed0aa63 100644 --- a/docs/source/en/using-diffusers/reusing_seeds.md +++ b/docs/source/en/using-diffusers/reusing_seeds.md @@ -10,129 +10,86 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Reproducible pipelines +# Reproducibility -Diffusion models are inherently random which is what allows it to generate different outputs every time it is run. But there are certain times when you want to generate the same output every time, like when you're testing, replicating results, and even [improving image quality](#deterministic-batch-generation). While you can't expect to get identical results across platforms, you can expect reproducible results across releases and platforms within a certain tolerance range (though even this may vary). +Diffusion is a random process that generates a different output every time. For certain situations like testing and replicating results, you want to generate the same result each time, across releases and platforms within a certain tolerance range. -This guide will show you how to control randomness for deterministic generation on a CPU and GPU. +This guide will show you how to control sources of randomness and enable deterministic algorithms. + +## Generator + +Pipelines rely on [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html), which uses a different random seed each time, to create the initial noisy tensors. To generate the same output on a CPU or GPU, use a [Generator](https://docs.pytorch.org/docs/stable/generated/torch.Generator.html) to manage how random values are generated. > [!TIP] -> We strongly recommend reading PyTorch's [statement about reproducibility](https://pytorch.org/docs/stable/notes/randomness.html): -> -> "Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms. Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds." +> If reproducibility is important to your use case, we recommend always using a CPU `Generator`. The performance loss is often negligible and you'll generate more similar values. -## Control randomness + + -During inference, pipelines rely heavily on random sampling operations which include creating the -Gaussian noise tensors to denoise and adding noise to the scheduling step. +The GPU uses a different random number generator than the CPU. Diffusers solves this issue with the [`~utils.torch_utils.randn_tensor`] function to create the random tensor on a CPU and then moving it to the GPU. This function is used everywhere inside the pipeline and you don't need to explicitly call it. -Take a look at the tensor values in the [`DDIMPipeline`] after two inference steps. +Use [manual_seed](https://docs.pytorch.org/docs/stable/generated/torch.manual_seed.html) as shown below to set a seed. -```python -from diffusers import DDIMPipeline -import numpy as np - -ddim = DDIMPipeline.from_pretrained( "google/ddpm-cifar10-32", use_safetensors=True) -image = ddim(num_inference_steps=2, output_type="np").images -print(np.abs(image).sum()) -``` - -Running the code above prints one value, but if you run it again you get a different value. - -Each time the pipeline is run, [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html) uses a different random seed to create the Gaussian noise tensors. This leads to a different result each time it is run and enables the diffusion pipeline to generate a different random image each time. - -But if you need to reliably generate the same image, that depends on whether you're running the pipeline on a CPU or GPU. - -> [!TIP] -> It might seem unintuitive to pass `Generator` objects to a pipeline instead of the integer value representing the seed. However, this is the recommended design when working with probabilistic models in PyTorch because a `Generator` is a *random state* that can be passed to multiple pipelines in a sequence. As soon as the `Generator` is consumed, the *state* is changed in place which means even if you passed the same `Generator` to a different pipeline, it won't produce the same result because the state is already changed. - - - - -To generate reproducible results on a CPU, you'll need to use a PyTorch [Generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed. Now when you run the code, it always prints a value of `1491.1711` because the `Generator` object with the seed is passed to all the random functions in the pipeline. You should get a similar, if not the same, result on whatever hardware and PyTorch version you're using. - -```python +```py import torch import numpy as np from diffusers import DDIMPipeline -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) +ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", device_map="cuda") +generator = torch.manual_seed(0) +image = ddim(num_inference_steps=2, output_type="np", generator=generator).images +print(np.abs(image).sum()) +``` + + + + +Set `device="cpu"` in the `Generator` and use [manual_seed](https://docs.pytorch.org/docs/stable/generated/torch.manual_seed.html) to set a seed for generating random numbers. + +```py +import torch +import numpy as np +from diffusers import DDIMPipeline + +ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32") generator = torch.Generator(device="cpu").manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` - - - -Writing a reproducible pipeline on a GPU is a bit trickier, and full reproducibility across different hardware is not guaranteed because matrix multiplication - which diffusion pipelines require a lot of - is less deterministic on a GPU than a CPU. For example, if you run the same code example from the CPU example, you'll get a different result even though the seed is identical. This is because the GPU uses a different random number generator than the CPU. - -```python -import torch -import numpy as np -from diffusers import DDIMPipeline - -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) -ddim.to("cuda") -generator = torch.Generator(device="cuda").manual_seed(0) -image = ddim(num_inference_steps=2, output_type="np", generator=generator).images -print(np.abs(image).sum()) -``` - -To avoid this issue, Diffusers has a [`~utils.torch_utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The [`~utils.torch_utils.randn_tensor`] function is used everywhere inside the pipeline. Now you can call [torch.manual_seed](https://pytorch.org/docs/stable/generated/torch.manual_seed.html) which automatically creates a CPU `Generator` that can be passed to the pipeline even if it is being run on a GPU. - -```python -import torch -import numpy as np -from diffusers import DDIMPipeline - -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) -ddim.to("cuda") -generator = torch.manual_seed(0) -image = ddim(num_inference_steps=2, output_type="np", generator=generator).images -print(np.abs(image).sum()) -``` - -> [!TIP] -> If reproducibility is important to your use case, we recommend always passing a CPU `Generator`. The performance loss is often negligible and you'll generate more similar values than if the pipeline had been run on a GPU. - -Finally, more complex pipelines such as [`UnCLIPPipeline`], are often extremely -susceptible to precision error propagation. You'll need to use -exactly the same hardware and PyTorch version for full reproducibility. - +The `Generator` object should be passed to the pipeline instead of an integer seed. `Generator` maintains a *random state* that is consumed and modified when used. Once consumed, the same `Generator` object produces different results in subsequent calls, even across different pipelines, because it's *state* has changed. + +```py +generator = torch.manual_seed(0) + +for _ in range(5): +- image = pipeline(prompt, generator=generator) ++ image = pipeline(prompt, generator=torch.manual_seed(0)) +``` + ## Deterministic algorithms -You can also configure PyTorch to use deterministic algorithms to create a reproducible pipeline. The downside is that deterministic algorithms may be slower than non-deterministic ones and you may observe a decrease in performance. +PyTorch supports [deterministic algorithms](https://docs.pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms) - where available - for certain operations so they produce the same results. Deterministic algorithms may be slower and decrease performance. -Non-deterministic behavior occurs when operations are launched in more than one CUDA stream. To avoid this, set the environment variable [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during runtime. - -PyTorch typically benchmarks multiple algorithms to select the fastest one, but if you want reproducibility, you should disable this feature because the benchmark may select different algorithms each time. Set Diffusers [enable_full_determinism](https://github.com/huggingface/diffusers/blob/142f353e1c638ff1d20bd798402b68f72c1ebbdd/src/diffusers/utils/testing_utils.py#L861) to enable deterministic algorithms. - -```py -enable_full_determinism() -``` - -Now when you run the same pipeline twice, you'll get identical results. +Use Diffusers' [enable_full_determinism](https://github.com/huggingface/diffusers/blob/142f353e1c638ff1d20bd798402b68f72c1ebbdd/src/diffusers/utils/testing_utils.py#L861) function to enable deterministic algorithms. ```py import torch -from diffusers import DDIMScheduler, StableDiffusionPipeline +from diffusers_utils import enable_full_determinism -pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True).to("cuda") -pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) -g = torch.Generator(device="cuda") - -prompt = "A bear is playing a guitar on Times Square" - -g.manual_seed(0) -result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images - -g.manual_seed(0) -result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images - -print("L_inf dist =", abs(result1 - result2).max()) -"L_inf dist = tensor(0., device='cuda:0')" +enable_full_determinism() ``` + +Under the hood, `enable_full_determinism` works by: + +- Setting the environment variable [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during rntime. Non-deterministic behavior occurs when operations are used in more than one CUDA stream. +- Disabling benchmarking to find the fastest convolution operation by setting `torch.backends.cudnn.benchmark=False`. Non-deterministic behavior occurs because the benchmark may select different algorithms each time depending on hardware or benchmarking noise. +- Disabling TensorFloat32 (TF32) operations in favor of more precise and consistent full-precision operations. + + +## Resources + +We strongly recommend reading PyTorch's developer notes about [Reproducibility](https://docs.pytorch.org/docs/stable/notes/randomness.html). You can try to limit randomness, but it is not *guaranteed* even with an identical seed. \ No newline at end of file From e58711e73cba70c1c02aaa67f80945a1458901b8 Mon Sep 17 00:00:00 2001 From: YiYi Xu Date: Wed, 27 Aug 2025 22:18:07 -1000 Subject: [PATCH 30/74] [Modular] support standard repo (#11944) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make modular pipeline work with model_index.json * up * style * up * up * style * up more * Fix MultiControlNet import (#12118) fix --------- Co-authored-by: Álvaro Somoza Co-authored-by: Dhruv Nair --- .../modular_pipelines/modular_pipeline.py | 150 +++++++++++++++--- .../stable_diffusion_xl/before_denoise.py | 2 +- src/diffusers/pipelines/pipeline_utils.py | 30 ++++ 3 files changed, 157 insertions(+), 25 deletions(-) diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index 8a05cce209..c53fa81d56 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -128,6 +128,15 @@ class PipelineState: """ return {**self.__dict__} + def __getattr__(self, name): + """ + Allow attribute access to intermediate values. If an attribute is not found in the object, look for it in the + intermediates dict. + """ + if name in self.intermediates: + return self.intermediates[name] + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + def __repr__(self): def format_value(v): if hasattr(v, "shape") and hasattr(v, "dtype"): @@ -638,7 +647,7 @@ class AutoPipelineBlocks(ModularPipelineBlocks): break if block is None: - logger.warning(f"skipping auto block: {self.__class__.__name__}") + logger.info(f"skipping auto block: {self.__class__.__name__}") return pipeline, state try: @@ -1450,9 +1459,10 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): Args: blocks: `ModularPipelineBlocks` instance. If None, will attempt to load default blocks based on the pipeline class name. - pretrained_model_name_or_path: Path to a pretrained pipeline configuration. If provided, - will load component specs (only for from_pretrained components) and config values from the saved - modular_model_index.json file. + pretrained_model_name_or_path: Path to a pretrained pipeline configuration. Can be None if the pipeline + does not require any additional loading config. If provided, will first try to load component specs + (only for from_pretrained components) and config values from `modular_model_index.json`, then + fallback to `model_index.json` for compatibility with standard non-modular repositories. components_manager: Optional ComponentsManager for managing multiple component cross different pipelines and apply offloading strategies. @@ -1501,18 +1511,70 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): # update component_specs and config_specs from modular_repo if pretrained_model_name_or_path is not None: - config_dict = self.load_config(pretrained_model_name_or_path, **kwargs) + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) - for name, value in config_dict.items(): - # all the components in modular_model_index.json are from_pretrained components - if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 3: - library, class_name, component_spec_dict = value - component_spec = self._dict_to_component_spec(name, component_spec_dict) - component_spec.default_creation_method = "from_pretrained" - self._component_specs[name] = component_spec + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + # try to load modular_model_index.json + try: + config_dict = self.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f"modular_model_index.json not found: {e}") + config_dict = None - elif name in self._config_specs: - self._config_specs[name].default = value + # update component_specs and config_specs based on modular_model_index.json + if config_dict is not None: + for name, value in config_dict.items(): + # all the components in modular_model_index.json are from_pretrained components + if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 3: + library, class_name, component_spec_dict = value + component_spec = self._dict_to_component_spec(name, component_spec_dict) + component_spec.default_creation_method = "from_pretrained" + self._component_specs[name] = component_spec + + elif name in self._config_specs: + self._config_specs[name].default = value + + # if modular_model_index.json is not found, try to load model_index.json + else: + logger.debug(" loading config from model_index.json") + try: + from diffusers import DiffusionPipeline + + config_dict = DiffusionPipeline.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f" model_index.json not found in the repo: {e}") + config_dict = None + + # update component_specs and config_specs based on model_index.json + if config_dict is not None: + for name, value in config_dict.items(): + if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 2: + library, class_name = value + component_spec_dict = { + "repo": pretrained_model_name_or_path, + "subfolder": name, + "type_hint": (library, class_name), + } + component_spec = self._dict_to_component_spec(name, component_spec_dict) + component_spec.default_creation_method = "from_pretrained" + self._component_specs[name] = component_spec + elif name in self._config_specs: + self._config_specs[name].default = value + + if len(kwargs) > 0: + logger.warning(f"Unexpected input '{kwargs.keys()}' provided. This input will be ignored.") register_components_dict = {} for name, component_spec in self._component_specs.items(): @@ -1570,8 +1632,10 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): Args: pretrained_model_name_or_path (`str` or `os.PathLike`, optional): - Path to a pretrained pipeline configuration. If provided, will load component specs (only for - from_pretrained components) and config values from the modular_model_index.json file. + Path to a pretrained pipeline configuration. It will first try to load config from + `modular_model_index.json`, then fallback to `model_index.json` for compatibility with standard + non-modular repositories. If the repo does not contain any pipeline config, it will be set to None + during initialization. trust_remote_code (`bool`, optional): Whether to trust remote code when loading the pipeline, need to be set to True if you want to create pipeline blocks based on the custom code in `pretrained_model_name_or_path` @@ -1607,11 +1671,35 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): } try: + # try to load modular_model_index.json config_dict = cls.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f" modular_model_index.json not found in the repo: {e}") + config_dict = None + + if config_dict is not None: pipeline_class = _get_pipeline_class(cls, config=config_dict) - except EnvironmentError: - pipeline_class = cls - pretrained_model_name_or_path = None + else: + try: + logger.debug(" try to load model_index.json") + from diffusers import DiffusionPipeline + from diffusers.pipelines.auto_pipeline import _get_model + + config_dict = DiffusionPipeline.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f" model_index.json not found in the repo: {e}") + + if config_dict is not None: + logger.debug(" try to determine the modular pipeline class from model_index.json") + standard_pipeline_class = _get_pipeline_class(cls, config=config_dict) + model_name = _get_model(standard_pipeline_class.__name__) + pipeline_class_name = MODULAR_PIPELINE_MAPPING.get(model_name, ModularPipeline.__name__) + diffusers_module = importlib.import_module("diffusers") + pipeline_class = getattr(diffusers_module, pipeline_class_name) + else: + # there is no config for modular pipeline, assuming that the pipeline block does not need any from_pretrained components + pipeline_class = cls + pretrained_model_name_or_path = None pipeline = pipeline_class( blocks=blocks, @@ -1949,17 +2037,31 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): for name, component in passed_components.items(): current_component_spec = self._component_specs[name] - # warn if type changed + # log if type changed if current_component_spec.type_hint is not None and not isinstance( component, current_component_spec.type_hint ): - logger.warning( + logger.info( f"ModularPipeline.update_components: adding {name} with new type: {component.__class__.__name__}, previous type: {current_component_spec.type_hint.__name__}" ) # update _component_specs based on the new component - new_component_spec = ComponentSpec.from_component(name, component) - if new_component_spec.default_creation_method != current_component_spec.default_creation_method: + if component is None: + new_component_spec = current_component_spec + if hasattr(self, name) and getattr(self, name) is not None: + logger.warning(f"ModularPipeline.update_components: setting {name} to None (spec unchanged)") + elif current_component_spec.default_creation_method == "from_pretrained" and not ( + hasattr(component, "_diffusers_load_id") and component._diffusers_load_id is not None + ): logger.warning( + f"ModularPipeline.update_components: {name} has no valid _diffusers_load_id. " + f"This will result in empty loading spec, use ComponentSpec.load() for proper specs" + ) + new_component_spec = ComponentSpec(name=name, type_hint=type(component)) + else: + new_component_spec = ComponentSpec.from_component(name, component) + + if new_component_spec.default_creation_method != current_component_spec.default_creation_method: + logger.info( f"ModularPipeline.update_components: changing the default_creation_method of {name} from {current_component_spec.default_creation_method} to {new_component_spec.default_creation_method}." ) @@ -1980,7 +2082,7 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): if current_component_spec.type_hint is not None and not isinstance( created_components[name], current_component_spec.type_hint ): - logger.warning( + logger.info( f"ModularPipeline.update_components: adding {name} with new type: {created_components[name].__class__.__name__}, previous type: {current_component_spec.type_hint.__name__}" ) # update _component_specs based on the user passed component_spec diff --git a/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py b/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py index fbe0d22a52..fefa622f1a 100644 --- a/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +++ b/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py @@ -22,7 +22,7 @@ from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, ControlNetModel, ControlNetUnionModel, UNet2DConditionModel -from ...pipelines.controlnet.multicontrolnet import MultiControlNetModel +from ...models.controlnets.multicontrolnet import MultiControlNetModel from ...schedulers import EulerDiscreteScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor, unwrap_module diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index d231989973..023feae4dd 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -1709,6 +1709,36 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): logger.warning(f"cannot get type annotation for Parameter {k} of {cls}.") return signature_types + @property + def parameters(self) -> Dict[str, Any]: + r""" + The `self.parameters` property can be useful to run different pipelines with the same weights and + configurations without reallocating additional memory. + + Returns (`dict`): + A dictionary containing all the optional parameters needed to initialize the pipeline. + + Examples: + + ```py + >>> from diffusers import ( + ... StableDiffusionPipeline, + ... StableDiffusionImg2ImgPipeline, + ... StableDiffusionInpaintPipeline, + ... ) + + >>> text2img = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components, **text2img.parameters) + >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components, **text2img.parameters) + ``` + """ + expected_modules, optional_parameters = self._get_signature_keys(self) + pipeline_parameters = { + k: self.config[k] for k in self.config.keys() if not k.startswith("_") and k in optional_parameters + } + + return pipeline_parameters + @property def components(self) -> Dict[str, Any]: r""" From 87b800e1546ecd1819a1eed4bfdf22e22f126588 Mon Sep 17 00:00:00 2001 From: Aryan Date: Thu, 28 Aug 2025 15:23:26 +0530 Subject: [PATCH 31/74] [modular diffusers] Fix AutoGuidance validation (#12247) fix --- src/diffusers/guiders/auto_guidance.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/diffusers/guiders/auto_guidance.py b/src/diffusers/guiders/auto_guidance.py index 8f4d7b11c9..5271a530ea 100644 --- a/src/diffusers/guiders/auto_guidance.py +++ b/src/diffusers/guiders/auto_guidance.py @@ -82,15 +82,15 @@ class AutoGuidance(BaseGuidance): self.guidance_rescale = guidance_rescale self.use_original_formulation = use_original_formulation - if auto_guidance_layers is None and auto_guidance_config is None: + is_layer_or_config_provided = auto_guidance_layers is not None or auto_guidance_config is not None + is_layer_and_config_provided = auto_guidance_layers is not None and auto_guidance_config is not None + if not is_layer_or_config_provided: raise ValueError( - "Either `auto_guidance_layers` or `auto_guidance_config` must be provided to enable Skip Layer Guidance." + "Either `auto_guidance_layers` or `auto_guidance_config` must be provided to enable AutoGuidance." ) - if auto_guidance_layers is not None and auto_guidance_config is not None: + if is_layer_and_config_provided: raise ValueError("Only one of `auto_guidance_layers` or `auto_guidance_config` can be provided.") - if (dropout is None and auto_guidance_layers is not None) or ( - dropout is not None and auto_guidance_layers is None - ): + if auto_guidance_config is None and dropout is None: raise ValueError("`dropout` must be provided if `auto_guidance_layers` is provided.") if auto_guidance_layers is not None: From 7aa6af1138b206bec10ab3af23a365c0f573b67d Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 28 Aug 2025 16:23:02 +0200 Subject: [PATCH 32/74] [Refactor] Move testing utils out of src (#12238) * update * update * update * update * update * merge main * Revert "merge main" This reverts commit 65efbcead58644b31596ed2d714f7cee0e0238d3. --- examples/conftest.py | 9 +- examples/controlnet/train_controlnet_sd3.py | 5 +- examples/vqgan/test_vqgan.py | 8 +- src/diffusers/utils/testing_utils.py | 102 +- src/diffusers/utils/torch_utils.py | 137 +- tests/conftest.py | 4 +- tests/hooks/__init__.py | 0 tests/hooks/test_group_offloading.py | 3 +- tests/hooks/test_hooks.py | 3 +- tests/lora/__init__.py | 0 tests/lora/test_lora_layers_auraflow.py | 5 +- tests/lora/test_lora_layers_cogvideox.py | 5 +- tests/lora/test_lora_layers_cogview4.py | 5 +- tests/lora/test_lora_layers_flux.py | 5 +- tests/lora/test_lora_layers_hunyuanvideo.py | 5 +- tests/lora/test_lora_layers_ltx_video.py | 5 +- tests/lora/test_lora_layers_lumina2.py | 5 +- tests/lora/test_lora_layers_mochi.py | 5 +- tests/lora/test_lora_layers_qwenimage.py | 5 +- tests/lora/test_lora_layers_sana.py | 5 +- tests/lora/test_lora_layers_sd.py | 5 +- tests/lora/test_lora_layers_sd3.py | 5 +- tests/lora/test_lora_layers_sdxl.py | 5 +- tests/lora/test_lora_layers_wan.py | 5 +- tests/lora/test_lora_layers_wanvace.py | 5 +- tests/lora/utils.py | 3 +- .../test_models_asymmetric_autoencoder_kl.py | 4 +- .../test_models_autoencoder_cosmos.py | 2 +- .../test_models_autoencoder_dc.py | 4 +- .../test_models_autoencoder_hunyuan_video.py | 4 +- .../test_models_autoencoder_kl.py | 4 +- .../test_models_autoencoder_kl_cogvideox.py | 4 +- ..._models_autoencoder_kl_temporal_decoder.py | 4 +- .../test_models_autoencoder_ltx_video.py | 4 +- .../test_models_autoencoder_magvit.py | 2 +- .../test_models_autoencoder_mochi.py | 4 +- .../test_models_autoencoder_oobleck.py | 4 +- .../test_models_autoencoder_tiny.py | 4 +- .../test_models_autoencoder_wan.py | 2 +- .../test_models_consistency_decoder_vae.py | 6 +- tests/models/autoencoders/test_models_vq.py | 4 +- tests/models/test_attention_processor.py | 3 +- tests/models/test_layers_utils.py | 3 +- tests/models/test_modeling_common.py | 8 +- .../test_models_dit_transformer2d.py | 4 +- .../test_models_pixart_transformer2d.py | 4 +- .../models/transformers/test_models_prior.py | 4 +- .../test_models_transformer_allegro.py | 4 +- .../test_models_transformer_aura_flow.py | 2 +- .../test_models_transformer_bria.py | 2 +- .../test_models_transformer_chroma.py | 2 +- .../test_models_transformer_cogvideox.py | 4 +- .../test_models_transformer_cogview3plus.py | 4 +- .../test_models_transformer_cogview4.py | 2 +- .../test_models_transformer_consisid.py | 4 +- .../test_models_transformer_cosmos.py | 2 +- .../test_models_transformer_easyanimate.py | 2 +- .../test_models_transformer_flux.py | 2 +- .../test_models_transformer_hidream.py | 4 +- .../test_models_transformer_hunyuan_dit.py | 4 +- .../test_models_transformer_hunyuan_video.py | 4 +- ...els_transformer_hunyuan_video_framepack.py | 4 +- .../test_models_transformer_latte.py | 4 +- .../test_models_transformer_ltx.py | 2 +- .../test_models_transformer_lumina.py | 4 +- .../test_models_transformer_lumina2.py | 4 +- .../test_models_transformer_mochi.py | 2 +- .../test_models_transformer_omnigen.py | 2 +- .../test_models_transformer_qwenimage.py | 2 +- .../test_models_transformer_sana.py | 4 +- .../test_models_transformer_sd3.py | 4 +- .../test_models_transformer_skyreels_v2.py | 4 +- .../test_models_transformer_temporal.py | 4 +- .../test_models_transformer_wan.py | 4 +- tests/models/unets/test_models_unet_1d.py | 4 +- tests/models/unets/test_models_unet_2d.py | 4 +- .../unets/test_models_unet_2d_condition.py | 4 +- .../unets/test_models_unet_3d_condition.py | 2 +- .../unets/test_models_unet_controlnetxs.py | 2 +- tests/models/unets/test_models_unet_motion.py | 4 +- .../unets/test_models_unet_spatiotemporal.py | 4 +- tests/models/unets/test_unet_2d_blocks.py | 2 +- tests/models/unets/test_unet_blocks_common.py | 5 +- ...st_modular_pipeline_stable_diffusion_xl.py | 10 +- .../test_modular_pipelines_common.py | 3 +- tests/others/__init__.py | 0 tests/others/test_config.py | 3 +- tests/others/test_ema.py | 3 +- tests/others/test_outputs.py | 3 +- tests/others/test_training.py | 3 +- tests/others/test_utils.py | 3 +- tests/pipelines/allegro/test_allegro.py | 4 +- .../pipelines/animatediff/test_animatediff.py | 4 +- .../test_animatediff_controlnet.py | 2 +- .../animatediff/test_animatediff_sdxl.py | 2 +- .../test_animatediff_sparsectrl.py | 2 +- .../test_animatediff_video2video.py | 2 +- ...test_animatediff_video2video_controlnet.py | 2 +- tests/pipelines/audioldm2/test_audioldm2.py | 4 +- tests/pipelines/bria/test_pipeline_bria.py | 9 +- .../pipelines/chroma/test_pipeline_chroma.py | 2 +- .../chroma/test_pipeline_chroma_img2img.py | 2 +- tests/pipelines/cogvideo/test_cogvideox.py | 4 +- .../cogvideo/test_cogvideox_fun_control.py | 4 +- .../cogvideo/test_cogvideox_image2video.py | 4 +- .../cogvideo/test_cogvideox_video2video.py | 2 +- tests/pipelines/cogview3/test_cogview3plus.py | 4 +- tests/pipelines/cogview4/test_cogview4.py | 2 +- tests/pipelines/consisid/test_consisid.py | 4 +- .../test_consistency_models.py | 6 +- tests/pipelines/controlnet/test_controlnet.py | 6 +- .../controlnet/test_controlnet_img2img.py | 6 +- .../controlnet/test_controlnet_inpaint.py | 6 +- .../test_controlnet_inpaint_sdxl.py | 4 +- .../controlnet/test_controlnet_sdxl.py | 6 +- .../test_controlnet_sdxl_img2img.py | 4 +- .../controlnet_flux/test_controlnet_flux.py | 6 +- .../test_controlnet_flux_img2img.py | 6 +- .../test_controlnet_flux_inpaint.py | 6 +- .../test_controlnet_hunyuandit.py | 6 +- .../test_controlnet_inpaint_sd3.py | 6 +- .../controlnet_sd3/test_controlnet_sd3.py | 6 +- tests/pipelines/cosmos/test_cosmos.py | 2 +- .../cosmos/test_cosmos2_text2image.py | 2 +- .../cosmos/test_cosmos2_video2world.py | 2 +- .../cosmos/test_cosmos_video2world.py | 2 +- tests/pipelines/ddim/test_ddim.py | 2 +- tests/pipelines/ddpm/test_ddpm.py | 3 +- tests/pipelines/deepfloyd_if/__init__.py | 2 +- tests/pipelines/deepfloyd_if/test_if.py | 4 +- .../pipelines/deepfloyd_if/test_if_img2img.py | 4 +- .../test_if_img2img_superresolution.py | 4 +- .../deepfloyd_if/test_if_inpainting.py | 4 +- .../test_if_inpainting_superresolution.py | 4 +- .../deepfloyd_if/test_if_superresolution.py | 4 +- tests/pipelines/dit/test_dit.py | 4 +- .../pipelines/easyanimate/test_easyanimate.py | 4 +- tests/pipelines/flux/test_pipeline_flux.py | 4 +- .../flux/test_pipeline_flux_control.py | 2 +- .../test_pipeline_flux_control_img2img.py | 2 +- .../test_pipeline_flux_control_inpaint.py | 4 +- .../pipelines/flux/test_pipeline_flux_fill.py | 4 +- .../flux/test_pipeline_flux_img2img.py | 4 +- .../flux/test_pipeline_flux_inpaint.py | 4 +- .../flux/test_pipeline_flux_kontext.py | 2 +- .../test_pipeline_flux_kontext_inpaint.py | 2 +- .../flux/test_pipeline_flux_redux.py | 3 +- .../hidream_image/test_pipeline_hidream.py | 2 +- .../hunyuan_video/test_hunyuan_image2video.py | 2 +- .../test_hunyuan_skyreels_image2video.py | 2 +- .../hunyuan_video/test_hunyuan_video.py | 2 +- .../test_hunyuan_video_framepack.py | 4 +- .../pipelines/hunyuandit/test_hunyuan_dit.py | 4 +- tests/pipelines/ip_adapters/__init__.py | 0 .../test_ip_adapter_stable_diffusion.py | 3 +- tests/pipelines/kandinsky/test_kandinsky.py | 4 +- .../kandinsky/test_kandinsky_combined.py | 2 +- .../kandinsky/test_kandinsky_img2img.py | 4 +- .../kandinsky/test_kandinsky_inpaint.py | 4 +- .../kandinsky/test_kandinsky_prior.py | 2 +- .../pipelines/kandinsky2_2/test_kandinsky.py | 4 +- .../kandinsky2_2/test_kandinsky_combined.py | 2 +- .../kandinsky2_2/test_kandinsky_controlnet.py | 4 +- .../test_kandinsky_controlnet_img2img.py | 4 +- .../kandinsky2_2/test_kandinsky_img2img.py | 4 +- .../kandinsky2_2/test_kandinsky_inpaint.py | 4 +- .../kandinsky2_2/test_kandinsky_prior.py | 2 +- .../test_kandinsky_prior_emb2emb.py | 4 +- tests/pipelines/kandinsky3/test_kandinsky3.py | 4 +- .../kandinsky3/test_kandinsky3_img2img.py | 4 +- tests/pipelines/kolors/test_kolors.py | 2 +- tests/pipelines/kolors/test_kolors_img2img.py | 4 +- .../test_latent_consistency_models.py | 4 +- .../test_latent_consistency_models_img2img.py | 4 +- .../latent_diffusion/test_latent_diffusion.py | 4 +- .../test_latent_diffusion_superresolution.py | 3 +- tests/pipelines/latte/test_latte.py | 4 +- .../test_ledits_pp_stable_diffusion.py | 3 +- .../test_ledits_pp_stable_diffusion_xl.py | 2 +- tests/pipelines/ltx/test_ltx.py | 2 +- tests/pipelines/ltx/test_ltx_condition.py | 2 +- tests/pipelines/ltx/test_ltx_image2video.py | 2 +- .../pipelines/ltx/test_ltx_latent_upsample.py | 2 +- tests/pipelines/lumina/test_lumina_nextdit.py | 4 +- .../pipelines/marigold/test_marigold_depth.py | 4 +- .../marigold/test_marigold_intrinsics.py | 4 +- .../marigold/test_marigold_normals.py | 4 +- tests/pipelines/mochi/test_mochi.py | 4 +- .../omnigen/test_pipeline_omnigen.py | 4 +- tests/pipelines/pag/test_pag_animatediff.py | 2 +- tests/pipelines/pag/test_pag_controlnet_sd.py | 2 +- .../pag/test_pag_controlnet_sd_inpaint.py | 2 +- .../pipelines/pag/test_pag_controlnet_sdxl.py | 2 +- .../pag/test_pag_controlnet_sdxl_img2img.py | 2 +- tests/pipelines/pag/test_pag_hunyuan_dit.py | 2 +- tests/pipelines/pag/test_pag_kolors.py | 2 +- tests/pipelines/pag/test_pag_pixart_sigma.py | 4 +- tests/pipelines/pag/test_pag_sana.py | 2 +- tests/pipelines/pag/test_pag_sd.py | 4 +- tests/pipelines/pag/test_pag_sd3.py | 4 +- tests/pipelines/pag/test_pag_sd3_img2img.py | 4 +- tests/pipelines/pag/test_pag_sd_img2img.py | 4 +- tests/pipelines/pag/test_pag_sd_inpaint.py | 4 +- tests/pipelines/pag/test_pag_sdxl.py | 4 +- tests/pipelines/pag/test_pag_sdxl_img2img.py | 4 +- tests/pipelines/pag/test_pag_sdxl_inpaint.py | 4 +- tests/pipelines/pixart_alpha/test_pixart.py | 4 +- tests/pipelines/pixart_sigma/test_pixart.py | 4 +- tests/pipelines/pndm/test_pndm.py | 3 +- tests/pipelines/qwenimage/test_qwenimage.py | 2 +- .../qwenimage/test_qwenimage_edit.py | 2 +- .../qwenimage/test_qwenimage_img2img.py | 4 +- .../qwenimage/test_qwenimage_inpaint.py | 2 +- tests/pipelines/sana/test_sana.py | 4 +- tests/pipelines/sana/test_sana_controlnet.py | 6 +- tests/pipelines/sana/test_sana_sprint.py | 4 +- .../sana/test_sana_sprint_img2img.py | 6 +- tests/pipelines/shap_e/test_shap_e.py | 4 +- tests/pipelines/shap_e/test_shap_e_img2img.py | 4 +- .../pipelines/skyreels_v2/test_skyreels_v2.py | 4 +- .../skyreels_v2/test_skyreels_v2_df.py | 4 +- .../test_skyreels_v2_df_image_to_video.py | 2 +- .../test_skyreels_v2_df_video_to_video.py | 4 +- .../test_skyreels_v2_image_to_video.py | 2 +- .../stable_audio/test_stable_audio.py | 4 +- .../test_stable_cascade_combined.py | 2 +- .../test_stable_cascade_decoder.py | 6 +- .../test_stable_cascade_prior.py | 3 +- .../test_onnx_stable_diffusion.py | 2 +- .../test_onnx_stable_diffusion_img2img.py | 4 +- .../test_onnx_stable_diffusion_inpaint.py | 4 +- .../test_onnx_stable_diffusion_upscale.py | 4 +- .../stable_diffusion/test_stable_diffusion.py | 4 +- .../test_stable_diffusion_img2img.py | 4 +- .../test_stable_diffusion_inpaint.py | 4 +- ...st_stable_diffusion_instruction_pix2pix.py | 4 +- .../test_stable_diffusion.py | 4 +- .../test_stable_diffusion_depth.py | 4 +- .../test_stable_diffusion_inpaint.py | 4 +- .../test_stable_diffusion_latent_upscale.py | 4 +- .../test_stable_diffusion_upscale.py | 3 +- .../test_stable_diffusion_v_pred.py | 3 +- .../test_pipeline_stable_diffusion_3.py | 4 +- ...est_pipeline_stable_diffusion_3_img2img.py | 4 +- ...est_pipeline_stable_diffusion_3_inpaint.py | 4 +- .../test_stable_diffusion_adapter.py | 4 +- .../test_stable_diffusion_image_variation.py | 4 +- .../test_stable_diffusion_xl.py | 4 +- .../test_stable_diffusion_xl_adapter.py | 4 +- .../test_stable_diffusion_xl_img2img.py | 4 +- .../test_stable_diffusion_xl_inpaint.py | 4 +- ...stable_diffusion_xl_instruction_pix2pix.py | 2 +- .../stable_unclip/test_stable_unclip.py | 4 +- .../test_stable_unclip_img2img.py | 4 +- .../test_stable_video_diffusion.py | 4 +- tests/pipelines/test_pipeline_utils.py | 3 +- tests/pipelines/test_pipelines.py | 5 +- tests/pipelines/test_pipelines_auto.py | 3 +- tests/pipelines/test_pipelines_common.py | 26 +- tests/pipelines/test_pipelines_onnx_common.py | 2 +- .../test_pipeline_visualcloze_combined.py | 4 +- .../test_pipeline_visualcloze_generation.py | 4 +- tests/pipelines/wan/test_wan.py | 4 +- tests/pipelines/wan/test_wan_22.py | 4 +- .../wan/test_wan_22_image_to_video.py | 4 +- .../pipelines/wan/test_wan_image_to_video.py | 2 +- tests/pipelines/wan/test_wan_vace.py | 2 +- .../pipelines/wan/test_wan_video_to_video.py | 4 +- tests/quantization/bnb/test_4bit.py | 4 +- tests/quantization/bnb/test_mixed_int8.py | 4 +- tests/quantization/gguf/test_gguf.py | 4 +- tests/quantization/quanto/test_quanto.py | 3 +- .../test_pipeline_level_quantization.py | 3 +- .../quantization/test_torch_compile_utils.py | 3 +- tests/quantization/torchao/test_torchao.py | 4 +- tests/quantization/utils.py | 3 +- tests/remote/test_remote_decode.py | 5 +- tests/remote/test_remote_encode.py | 3 +- tests/schedulers/test_scheduler_dpm_sde.py | 2 +- tests/schedulers/test_scheduler_euler.py | 2 +- .../test_scheduler_euler_ancestral.py | 2 +- tests/schedulers/test_scheduler_heun.py | 2 +- .../test_scheduler_kdpm2_ancestral.py | 2 +- .../test_scheduler_kdpm2_discrete.py | 2 +- tests/schedulers/test_scheduler_lcm.py | 2 +- tests/schedulers/test_scheduler_lms.py | 2 +- tests/schedulers/test_scheduler_sasolver.py | 2 +- tests/schedulers/test_schedulers.py | 2 +- .../single_file/single_file_testing_utils.py | 3 +- tests/single_file/test_lumina2_transformer.py | 3 +- .../test_model_autoencoder_dc_single_file.py | 3 +- .../test_model_controlnet_single_file.py | 3 +- ...test_model_flux_transformer_single_file.py | 3 +- .../test_model_motion_adapter_single_file.py | 3 +- .../test_model_sd_cascade_unet_single_file.py | 3 +- .../single_file/test_model_vae_single_file.py | 3 +- .../test_model_wan_autoencoder_single_file.py | 3 +- ...est_model_wan_transformer3d_single_file.py | 3 +- tests/single_file/test_sana_transformer.py | 3 +- ...iffusion_controlnet_img2img_single_file.py | 4 +- ...iffusion_controlnet_inpaint_single_file.py | 4 +- ...stable_diffusion_controlnet_single_file.py | 4 +- ...st_stable_diffusion_img2img_single_file.py | 4 +- ...st_stable_diffusion_inpaint_single_file.py | 4 +- .../test_stable_diffusion_single_file.py | 4 +- ...st_stable_diffusion_upscale_single_file.py | 4 +- ...stable_diffusion_xl_adapter_single_file.py | 4 +- ...ble_diffusion_xl_controlnet_single_file.py | 4 +- ...stable_diffusion_xl_img2img_single_file.py | 4 +- ...st_stable_diffusion_xl_instruct_pix2pix.py | 3 +- .../test_stable_diffusion_xl_single_file.py | 4 +- tests/testing_utils.py | 1557 +++++++++++++++++ 312 files changed, 2360 insertions(+), 554 deletions(-) create mode 100644 tests/hooks/__init__.py create mode 100644 tests/lora/__init__.py create mode 100644 tests/others/__init__.py create mode 100644 tests/pipelines/ip_adapters/__init__.py create mode 100644 tests/testing_utils.py diff --git a/examples/conftest.py b/examples/conftest.py index 9b8996430f..ff7543ba82 100644 --- a/examples/conftest.py +++ b/examples/conftest.py @@ -25,6 +25,11 @@ from os.path import abspath, dirname, join git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) +# Add parent directory to path so we can import from tests +repo_root = abspath(dirname(dirname(__file__))) +if repo_root not in sys.path: + sys.path.insert(0, repo_root) + # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality @@ -32,13 +37,13 @@ warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_addoption(parser): - from diffusers.utils.testing_utils import pytest_addoption_shared + from tests.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): - from diffusers.utils.testing_utils import pytest_terminal_summary_main + from tests.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: diff --git a/examples/controlnet/train_controlnet_sd3.py b/examples/controlnet/train_controlnet_sd3.py index 20ef5c31b9..1d6fc57640 100644 --- a/examples/controlnet/train_controlnet_sd3.py +++ b/examples/controlnet/train_controlnet_sd3.py @@ -24,6 +24,8 @@ import math import os import random import shutil + +# Add repo root to path to import from tests from pathlib import Path import accelerate @@ -54,8 +56,7 @@ from diffusers.optimization import get_scheduler from diffusers.training_utils import compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3, free_memory from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card -from diffusers.utils.testing_utils import backend_empty_cache -from diffusers.utils.torch_utils import is_compiled_module +from diffusers.utils.torch_utils import backend_empty_cache, is_compiled_module if is_wandb_available(): diff --git a/examples/vqgan/test_vqgan.py b/examples/vqgan/test_vqgan.py index d13e102e78..a3c8ee1e84 100644 --- a/examples/vqgan/test_vqgan.py +++ b/examples/vqgan/test_vqgan.py @@ -24,12 +24,18 @@ import tempfile import torch from diffusers import VQModel -from diffusers.utils.testing_utils import require_timm +# Add parent directories to path to import from tests sys.path.append("..") +repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) +if repo_root not in sys.path: + sys.path.insert(0, repo_root) + from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 +from tests.testing_utils import require_timm # noqa + logging.basicConfig(level=logging.DEBUG) diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index a0307c108a..6d6a7d6ce4 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -66,7 +66,10 @@ else: global_rng = random.Random() logger = get_logger(__name__) - +logger.warning( + "diffusers.utils.testing_utils' is deprecated and will be removed in a future version. " + "Determinism and device backend utilities have been moved to `diffusers.utils.torch_utils`. " +) _required_peft_version = is_peft_available() and version.parse( version.parse(importlib.metadata.version("peft")).base_version ) > version.parse("0.5") @@ -801,10 +804,9 @@ def export_to_ply(mesh, output_ply_path: str = None): f.write(format.pack(*vertex)) if faces is not None: - format = struct.Struct(" version.parse("0.5") +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) > version.parse("4.33") + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version +BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40)) + +if is_torch_available(): + import torch + + # Set a backend environment variable for any extra module import required for a custom accelerator + if "DIFFUSERS_TEST_BACKEND" in os.environ: + backend = os.environ["DIFFUSERS_TEST_BACKEND"] + try: + _ = importlib.import_module(backend) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \ + to enable a specified backend.):\n{e}" + ) from e + + if "DIFFUSERS_TEST_DEVICE" in os.environ: + torch_device = os.environ["DIFFUSERS_TEST_DEVICE"] + try: + # try creating device to see if provided device is valid + _ = torch.device(torch_device) + except RuntimeError as e: + raise RuntimeError( + f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}" + ) from e + logger.info(f"torch_device overrode to {torch_device}") + else: + if torch.cuda.is_available(): + torch_device = "cuda" + elif torch.xpu.is_available(): + torch_device = "xpu" + else: + torch_device = "cpu" + is_torch_higher_equal_than_1_12 = version.parse( + version.parse(torch.__version__).base_version + ) >= version.parse("1.12") + + if is_torch_higher_equal_than_1_12: + # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details + mps_backend_registered = hasattr(torch.backends, "mps") + torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device + + from diffusers.utils.torch_utils import get_torch_cuda_device_capability + + +def torch_all_close(a, b, *args, **kwargs): + if not is_torch_available(): + raise ValueError("PyTorch needs to be installed to use this function.") + if not torch.allclose(a, b, *args, **kwargs): + assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}." + return True + + +def numpy_cosine_similarity_distance(a, b): + similarity = np.dot(a, b) / (norm(a) * norm(b)) + distance = 1.0 - similarity.mean() + + return distance + + +def check_if_dicts_are_equal(dict1, dict2): + dict1, dict2 = dict1.copy(), dict2.copy() + + for key, value in dict1.items(): + if isinstance(value, set): + dict1[key] = sorted(value) + for key, value in dict2.items(): + if isinstance(value, set): + dict2[key] = sorted(value) + + for key in dict1: + if key not in dict2: + return False + if dict1[key] != dict2[key]: + return False + + for key in dict2: + if key not in dict1: + return False + + return True + + +def print_tensor_test( + tensor, + limit_to_slices=None, + max_torch_print=None, + filename="test_corrections.txt", + expected_tensor_name="expected_slice", +): + if max_torch_print: + torch.set_printoptions(threshold=10_000) + + test_name = os.environ.get("PYTEST_CURRENT_TEST") + if not torch.is_tensor(tensor): + tensor = torch.from_numpy(tensor) + if limit_to_slices: + tensor = tensor[0, -3:, -3:, -1] + + tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") + # format is usually: + # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) + output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") + test_file, test_class, test_fn = test_name.split("::") + test_fn = test_fn.split()[0] + with open(filename, "a") as f: + print("::".join([test_file, test_class, test_fn, output_str]), file=f) + + +def get_tests_dir(append_path=None): + """ + Args: + append_path: optional path to append to the tests dir path + Return: + The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is + joined after the `tests` dir the former is provided. + """ + # this function caller's __file__ + caller__file__ = inspect.stack()[1][1] + tests_dir = os.path.abspath(os.path.dirname(caller__file__)) + + while not tests_dir.endswith("tests"): + tests_dir = os.path.dirname(tests_dir) + + if append_path: + return Path(tests_dir, append_path).as_posix() + else: + return tests_dir + + +# Taken from the following PR: +# https://github.com/huggingface/accelerate/pull/1964 +def str_to_bool(value) -> int: + """ + Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, + `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; + """ + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError(f"invalid truth value {value}") + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) +_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False) + + +def floats_tensor(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.random() * scale) + + return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def nightly(test_case): + """ + Decorator marking a test that runs nightly in the diffusers CI. + + Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) + + +def is_torch_compile(test_case): + """ + Decorator marking a test that runs compile tests in the diffusers CI. + + Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case) + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. + """ + return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) + + +def require_torch_2(test_case): + """ + Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. + """ + return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( + test_case + ) + + +def require_torch_version_greater_equal(torch_version): + """Decorator marking a test that requires torch with a specific version or greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">=", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than or equal to {torch_version}" + )(test_case) + + return decorator + + +def require_torch_version_greater(torch_version): + """Decorator marking a test that requires torch with a specific version greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than {torch_version}" + )(test_case) + + return decorator + + +def require_torch_gpu(test_case): + """Decorator marking a test that requires CUDA and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( + test_case + ) + + +def require_torch_cuda_compatibility(expected_compute_capability): + def decorator(test_case): + if torch.cuda.is_available(): + current_compute_capability = get_torch_cuda_device_capability() + return unittest.skipUnless( + float(current_compute_capability) == float(expected_compute_capability), + "Test not supported for this compute capability.", + ) + + return decorator + + +# These decorators are for accelerator-specific behaviours that are not GPU-specific +def require_torch_accelerator(test_case): + """Decorator marking a test that requires an accelerator backend and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")( + test_case + ) + + +def require_torch_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without + multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests + -k "multi_gpu" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_torch_multi_accelerator(test_case): + """ + Decorator marking a test that requires a multi-accelerator setup (in PyTorch). These tests are skipped on a machine + without multiple hardware accelerators. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless( + torch.cuda.device_count() > 1 or torch.xpu.device_count() > 1, "test requires multiple hardware accelerators" + )(test_case) + + +def require_torch_accelerator_with_fp16(test_case): + """Decorator marking a test that requires an accelerator with support for the FP16 data type.""" + return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")( + test_case + ) + + +def require_torch_accelerator_with_fp64(test_case): + """Decorator marking a test that requires an accelerator with support for the FP64 data type.""" + return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")( + test_case + ) + + +def require_big_gpu_with_torch_cuda(test_case): + """ + Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog, + etc. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not torch.cuda.is_available(): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + device_properties = torch.cuda.get_device_properties(0) + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory" + )(test_case) + + +def require_big_accelerator(test_case): + """ + Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines: + Flux, SD3, Cog, etc. + """ + import pytest + + test_case = pytest.mark.big_accelerator(test_case) + + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not (torch.cuda.is_available() or torch.xpu.is_available()): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + if torch.xpu.is_available(): + device_properties = torch.xpu.get_device_properties(0) + else: + device_properties = torch.cuda.get_device_properties(0) + + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, + f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory", + )(test_case) + + +def require_torch_accelerator_with_training(test_case): + """Decorator marking a test that requires an accelerator with support for training.""" + return unittest.skipUnless( + is_torch_available() and backend_supports_training(torch_device), + "test requires accelerator with training support", + )(test_case) + + +def skip_mps(test_case): + """Decorator marking a test to skip if torch_device is 'mps'""" + return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) + + +def require_flax(test_case): + """ + Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed + """ + return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) + + +def require_compel(test_case): + """ + Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when + the library is not installed. + """ + return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case) + + +def require_onnxruntime(test_case): + """ + Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed. + """ + return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case) + + +def require_note_seq(test_case): + """ + Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed. + """ + return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) + + +def require_accelerator(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case) + + +def require_torchsde(test_case): + """ + Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. + """ + return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case) + + +def require_peft_backend(test_case): + """ + Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and + transformers. + """ + return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires timm. These tests are skipped when timm isn't installed. + """ + return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case) + + +def require_bitsandbytes(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when bitsandbytes isn't installed. + """ + return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case) + + +def require_quanto(test_case): + """ + Decorator marking a test that requires quanto. These tests are skipped when quanto isn't installed. + """ + return unittest.skipUnless(is_optimum_quanto_available(), "test requires quanto")(test_case) + + +def require_accelerate(test_case): + """ + Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed. + """ + return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case) + + +def require_peft_version_greater(peft_version): + """ + Decorator marking a test that requires PEFT backend with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version + ) > version.parse(peft_version) + return unittest.skipUnless( + correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}" + )(test_case) + + return decorator + + +def require_transformers_version_greater(transformers_version): + """ + Decorator marking a test that requires transformers with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version + ) > version.parse(transformers_version) + return unittest.skipUnless( + correct_transformers_version, + f"test requires transformers with the version greater than {transformers_version}", + )(test_case) + + return decorator + + +def require_accelerate_version_greater(accelerate_version): + def decorator(test_case): + correct_accelerate_version = is_accelerate_available() and version.parse( + version.parse(importlib.metadata.version("accelerate")).base_version + ) > version.parse(accelerate_version) + return unittest.skipUnless( + correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}." + )(test_case) + + return decorator + + +def require_bitsandbytes_version_greater(bnb_version): + def decorator(test_case): + correct_bnb_version = is_bitsandbytes_available() and version.parse( + version.parse(importlib.metadata.version("bitsandbytes")).base_version + ) > version.parse(bnb_version) + return unittest.skipUnless( + correct_bnb_version, f"Test requires bitsandbytes with the version greater than {bnb_version}." + )(test_case) + + return decorator + + +def require_hf_hub_version_greater(hf_hub_version): + def decorator(test_case): + correct_hf_hub_version = version.parse( + version.parse(importlib.metadata.version("huggingface_hub")).base_version + ) > version.parse(hf_hub_version) + return unittest.skipUnless( + correct_hf_hub_version, f"Test requires huggingface_hub with the version greater than {hf_hub_version}." + )(test_case) + + return decorator + + +def require_gguf_version_greater_or_equal(gguf_version): + def decorator(test_case): + correct_gguf_version = is_gguf_available() and version.parse( + version.parse(importlib.metadata.version("gguf")).base_version + ) >= version.parse(gguf_version) + return unittest.skipUnless( + correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}." + )(test_case) + + return decorator + + +def require_torchao_version_greater_or_equal(torchao_version): + def decorator(test_case): + correct_torchao_version = is_torchao_available() and version.parse( + version.parse(importlib.metadata.version("torchao")).base_version + ) >= version.parse(torchao_version) + return unittest.skipUnless( + correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}." + )(test_case) + + return decorator + + +def require_kernels_version_greater_or_equal(kernels_version): + def decorator(test_case): + correct_kernels_version = is_kernels_available() and version.parse( + version.parse(importlib.metadata.version("kernels")).base_version + ) >= version.parse(kernels_version) + return unittest.skipUnless( + correct_kernels_version, f"Test requires kernels with version greater than {kernels_version}." + )(test_case) + + return decorator + + +def deprecate_after_peft_backend(test_case): + """ + Decorator marking a test that will be skipped after PEFT backend + """ + return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case) + + +def get_python_version(): + sys_info = sys.version_info + major, minor = sys_info.major, sys_info.minor + return major, minor + + +def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray: + if isinstance(arry, str): + if local_path is not None: + # local_path can be passed to correct images of tests + return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix() + elif arry.startswith("http://") or arry.startswith("https://"): + response = requests.get(arry, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = np.load(BytesIO(response.content)) + elif os.path.isfile(arry): + arry = np.load(arry) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path" + ) + elif isinstance(arry, np.ndarray): + pass + else: + raise ValueError( + "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a" + " ndarray." + ) + + return arry + + +def load_pt(url: str, map_location: Optional[str] = None, weights_only: Optional[bool] = True): + response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = torch.load(BytesIO(response.content), map_location=map_location, weights_only=weights_only) + return arry + + +def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path" + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image." + ) + image = PIL.ImageOps.exif_transpose(image) + image = image.convert("RGB") + return image + + +def preprocess_image(image: PIL.Image, batch_size: int): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=100, + loop=0, + ) + return output_gif_path + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + if is_opencv_available(): + import cv2 + else: + raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + h, w, c = video_frames[0].shape + video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h)) + for i in range(len(video_frames)): + img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) + video_writer.write(img) + return output_video_path + + +def load_hf_numpy(path) -> np.ndarray: + base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main" + + if not path.startswith("http://") and not path.startswith("https://"): + path = os.path.join(base_url, urllib.parse.quote(path)) + + return load_numpy(path) + + +# --- pytest conf functions --- # + +# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once +pytest_opt_registered = {} + + +def pytest_addoption_shared(parser): + """ + This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. + + It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` + option. + + """ + option = "--make-reports" + if option not in pytest_opt_registered: + parser.addoption( + option, + action="store", + default=False, + help="generate report files. The value of this option is used as a prefix to report names", + ) + pytest_opt_registered[option] = 1 + + +def pytest_terminal_summary_main(tr, id): + """ + Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current + directory. The report files are prefixed with the test suite name. + + This function emulates --duration and -rA pytest arguments. + + This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined + there. + + Args: + - tr: `terminalreporter` passed from `conftest.py` + - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is + needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. + + NB: this functions taps into a private _pytest API and while unlikely, it could break should + pytest do internal changes - also it calls default internal methods of terminalreporter which + can be hijacked by various `pytest-` plugins and interfere. + + """ + from _pytest.config import create_terminal_writer + + if not len(id): + id = "tests" + + config = tr.config + orig_writer = config.get_terminal_writer() + orig_tbstyle = config.option.tbstyle + orig_reportchars = tr.reportchars + + dir = "reports" + Path(dir).mkdir(parents=True, exist_ok=True) + report_files = { + k: f"{dir}/{id}_{k}.txt" + for k in [ + "durations", + "errors", + "failures_long", + "failures_short", + "failures_line", + "passes", + "stats", + "summary_short", + "warnings", + ] + } + + # custom durations report + # note: there is no need to call pytest --durations=XX to get this separate report + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66 + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if dlist: + dlist.sort(key=lambda x: x.duration, reverse=True) + with open(report_files["durations"], "w") as f: + durations_min = 0.05 # sec + f.write("slowest durations\n") + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted") + break + f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n") + + def summary_failures_short(tr): + # expecting that the reports were --tb=long (default) so we chop them off here to the last frame + reports = tr.getreports("failed") + if not reports: + return + tr.write_sep("=", "FAILURES SHORT STACK") + for rep in reports: + msg = tr._getfailureheadline(rep) + tr.write_sep("_", msg, red=True, bold=True) + # chop off the optional leading extra frames, leaving only the last one + longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S) + tr._tw.line(longrepr) + # note: not printing out any rep.sections to keep the report short + + # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814 + # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g. + # pytest-instafail does that) + + # report failures with line/short/long styles + config.option.tbstyle = "auto" # full tb + with open(report_files["failures_long"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + # config.option.tbstyle = "short" # short tb + with open(report_files["failures_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + summary_failures_short(tr) + + config.option.tbstyle = "line" # one line per error + with open(report_files["failures_line"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + with open(report_files["errors"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_errors() + + with open(report_files["warnings"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_warnings() # normal warnings + tr.summary_warnings() # final warnings + + tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary()) + with open(report_files["passes"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_passes() + + with open(report_files["summary_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.short_test_summary() + + with open(report_files["stats"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_stats() + + # restore: + tr._tw = orig_writer + tr.reportchars = orig_reportchars + config.option.tbstyle = orig_tbstyle + + +# Adapted from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers..testing_utils.py#L1905 +def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): + """ + To decorate flaky tests (methods or entire classes). They will be retried on failures. + + Args: + max_attempts (`int`, *optional*, defaults to 5): + The maximum number of attempts to retry the flaky test. + wait_before_retry (`float`, *optional*): + If provided, will wait that number of seconds before retrying the test. + description (`str`, *optional*): + A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors, + etc.) + """ + + def decorator(obj): + # If decorating a class, wrap each test method on it + if inspect.isclass(obj): + for attr_name, attr_value in list(obj.__dict__.items()): + if callable(attr_value) and attr_name.startswith("test"): + # recursively decorate the method + setattr(obj, attr_name, decorator(attr_value)) + return obj + + # Otherwise we're decorating a single test function / method + @functools.wraps(obj) + def wrapper(*args, **kwargs): + retry_count = 1 + while retry_count < max_attempts: + try: + return obj(*args, **kwargs) + except Exception as err: + msg = ( + f"[FLAKY] {description or obj.__name__!r} " + f"failed on attempt {retry_count}/{max_attempts}: {err}" + ) + print(msg, file=sys.stderr) + if wait_before_retry is not None: + time.sleep(wait_before_retry) + retry_count += 1 + + return obj(*args, **kwargs) + + return wrapper + + return decorator + + +# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers..testing_utils.py#L1787 +def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): + """ + To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. + + Args: + test_case (`unittest.TestCase`): + The test that will run `target_func`. + target_func (`Callable`): + The function implementing the actual testing logic. + inputs (`dict`, *optional*, defaults to `None`): + The inputs that will be passed to `target_func` through an (input) queue. + timeout (`int`, *optional*, defaults to `None`): + The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. + variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`. + """ + if timeout is None: + timeout = int(os.environ.get("PYTEST_TIMEOUT", 600)) + + start_methohd = "spawn" + ctx = multiprocessing.get_context(start_methohd) + + input_queue = ctx.Queue(1) + output_queue = ctx.JoinableQueue(1) + + # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle. + input_queue.put(inputs, timeout=timeout) + + process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) + process.start() + # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents + # the test to exit properly. + try: + results = output_queue.get(timeout=timeout) + output_queue.task_done() + except Exception as e: + process.terminate() + test_case.fail(e) + process.join(timeout=timeout) + + if results["error"] is not None: + test_case.fail(f"{results['error']}") + + +class CaptureLogger: + """ + Args: + Context manager to capture `logging` streams + logger: 'logging` logger object + Returns: + The captured output is available via `self.out` + Example: + ```python + >>> from diffusers import logging + >>> from diffusers..testing_utils import CaptureLogger + + >>> msg = "Testing 1, 2, 3" + >>> logging.set_verbosity_info() + >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py") + >>> with CaptureLogger(logger) as cl: + ... logger.info(msg) + >>> assert cl.out, msg + "\n" + ``` + """ + + def __init__(self, logger): + self.logger = logger + self.io = StringIO() + self.sh = logging.StreamHandler(self.io) + self.out = "" + + def __enter__(self): + self.logger.addHandler(self.sh) + return self + + def __exit__(self, *exc): + self.logger.removeHandler(self.sh) + self.out = self.io.getvalue() + + def __repr__(self): + return f"captured: {self.out}\n" + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False + + +def disable_full_determinism(): + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" + torch.use_deterministic_algorithms(False) + + +# Utils for custom and alternative accelerator devices +def _is_torch_fp16_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float16).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +def _is_torch_fp64_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float64).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch +if is_torch_available(): + # Behaviour flags + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} + + # Function definitions + BACKEND_EMPTY_CACHE = { + "cuda": torch.cuda.empty_cache, + "xpu": torch.xpu.empty_cache, + "cpu": None, + "mps": torch.mps.empty_cache, + "default": None, + } + BACKEND_DEVICE_COUNT = { + "cuda": torch.cuda.device_count, + "xpu": torch.xpu.device_count, + "cpu": lambda: 0, + "mps": lambda: 0, + "default": 0, + } + BACKEND_MANUAL_SEED = { + "cuda": torch.cuda.manual_seed, + "xpu": torch.xpu.manual_seed, + "cpu": torch.manual_seed, + "mps": torch.mps.manual_seed, + "default": torch.manual_seed, + } + BACKEND_RESET_PEAK_MEMORY_STATS = { + "cuda": torch.cuda.reset_peak_memory_stats, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_RESET_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.reset_max_memory_allocated, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.max_memory_allocated, + "xpu": getattr(torch.xpu, "max_memory_allocated", None), + "cpu": 0, + "mps": 0, + "default": 0, + } + BACKEND_SYNCHRONIZE = { + "cuda": torch.cuda.synchronize, + "xpu": getattr(torch.xpu, "synchronize", None), + "cpu": None, + "mps": None, + "default": None, + } + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if not callable(fn): + return fn + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_synchronize(device: str): + return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +def backend_reset_peak_memory_stats(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS) + + +def backend_reset_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED) + + +def backend_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + if not is_torch_available(): + return False + + if device not in BACKEND_SUPPORTS_TRAINING: + device = "default" + + return BACKEND_SUPPORTS_TRAINING[device] + + +# Guard for when Torch is not available +if is_torch_available(): + # Update device function dict mapping + def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str): + try: + # Try to import the function directly + spec_fn = getattr(device_spec_module, attribute_name) + device_fn_dict[torch_device] = spec_fn + except AttributeError as e: + # If the function doesn't exist, and there is no default, throw an error + if "default" not in device_fn_dict: + raise AttributeError( + f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found." + ) from e + + if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ: + device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"] + if not Path(device_spec_path).is_file(): + raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}") + + try: + import_name = device_spec_path[: device_spec_path.index(".py")] + except ValueError as e: + raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e + + device_spec_module = importlib.import_module(import_name) + + try: + device_name = device_spec_module.DEVICE_NAME + except AttributeError: + raise AttributeError("Device spec file did not contain `DEVICE_NAME`") + + if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name: + msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n" + msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name." + raise ValueError(msg) + + torch_device = device_name + + # Add one entry here for each `BACKEND_*` dictionary. + update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN") + update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN") + update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN") + update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING") + update_mapping_from_spec(BACKEND_RESET_PEAK_MEMORY_STATS, "RESET_PEAK_MEMORY_STATS_FN") + update_mapping_from_spec(BACKEND_RESET_MAX_MEMORY_ALLOCATED, "RESET_MAX_MEMORY_ALLOCATED_FN") + update_mapping_from_spec(BACKEND_MAX_MEMORY_ALLOCATED, "MAX_MEMORY_ALLOCATED_FN") + + +# Modified from https://github.com/huggingface/transformers/blob/cdfb018d0300fef3b07d9220f3efe9c2a9974662/src/transformers..testing_utils.py#L3090 + +# Type definition of key used in `Expectations` class. +DeviceProperties = Tuple[Union[str, None], Union[int, None]] + + +@functools.lru_cache +def get_device_properties() -> DeviceProperties: + """ + Get environment device properties. + """ + if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM: + import torch + + major, _ = torch.cuda.get_device_capability() + if IS_ROCM_SYSTEM: + return ("rocm", major) + else: + return ("cuda", major) + elif IS_XPU_SYSTEM: + import torch + + # To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def + arch = torch.xpu.get_device_capability()["architecture"] + gen_mask = 0x000000FF00000000 + gen = (arch & gen_mask) >> 32 + return ("xpu", gen) + else: + return (torch_device, None) + + +if TYPE_CHECKING: + DevicePropertiesUserDict = UserDict[DeviceProperties, Any] +else: + DevicePropertiesUserDict = UserDict + +if is_torch_available(): + from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS + from diffusers.hooks.group_offloading import ( + _GROUP_ID_LAZY_LEAF, + _compute_group_hash, + _find_parent_module_in_module_dict, + _gather_buffers_with_no_group_offloading_parent, + _gather_parameters_with_no_group_offloading_parent, + ) + + def _get_expected_safetensors_files( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> Set[str]: + expected_files = set() + + def get_hashed_filename(group_id: str) -> str: + short_hash = _compute_group_hash(group_id) + return os.path.join(offload_to_disk_path, f"group_{short_hash}.safetensors") + + if offload_type == "block_level": + if num_blocks_per_group is None: + raise ValueError("num_blocks_per_group must be provided for 'block_level' offloading.") + + # Handle groups of ModuleList and Sequential blocks + unmatched_modules = [] + for name, submodule in module.named_children(): + if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)): + unmatched_modules.append(module) + continue + + for i in range(0, len(submodule), num_blocks_per_group): + current_modules = submodule[i : i + num_blocks_per_group] + if not current_modules: + continue + group_id = f"{name}_{i}_{i + len(current_modules) - 1}" + expected_files.add(get_hashed_filename(group_id)) + + # Handle the group for unmatched top-level modules and parameters + for module in unmatched_modules: + expected_files.add(get_hashed_filename(f"{module.__class__.__name__}_unmatched_group")) + + elif offload_type == "leaf_level": + # Handle leaf-level module groups + for name, submodule in module.named_modules(): + if isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): + # These groups will always have parameters, so a file is expected + expected_files.add(get_hashed_filename(name)) + + # Handle groups for non-leaf parameters/buffers + modules_with_group_offloading = { + name for name, sm in module.named_modules() if isinstance(sm, _GO_LC_SUPPORTED_PYTORCH_LAYERS) + } + parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) + buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) + + all_orphans = parameters + buffers + if all_orphans: + parent_to_tensors = {} + module_dict = dict(module.named_modules()) + for tensor_name, _ in all_orphans: + parent_name = _find_parent_module_in_module_dict(tensor_name, module_dict) + if parent_name not in parent_to_tensors: + parent_to_tensors[parent_name] = [] + parent_to_tensors[parent_name].append(tensor_name) + + for parent_name in parent_to_tensors: + # A file is expected for each parent that gathers orphaned tensors + expected_files.add(get_hashed_filename(parent_name)) + expected_files.add(get_hashed_filename(_GROUP_ID_LAZY_LEAF)) + + else: + raise ValueError(f"Unsupported offload_type: {offload_type}") + + return expected_files + + def _check_safetensors_serialization( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> bool: + if not os.path.isdir(offload_to_disk_path): + return False, None, None + + expected_files = _get_expected_safetensors_files( + module, offload_to_disk_path, offload_type, num_blocks_per_group + ) + actual_files = set(glob.glob(os.path.join(offload_to_disk_path, "*.safetensors"))) + missing_files = expected_files - actual_files + extra_files = actual_files - expected_files + + is_correct = not missing_files and not extra_files + return is_correct, extra_files, missing_files + + +class Expectations(DevicePropertiesUserDict): + def get_expectation(self) -> Any: + """ + Find best matching expectation based on environment device properties. + """ + return self.find_expectation(get_device_properties()) + + @staticmethod + def is_default(key: DeviceProperties) -> bool: + return all(p is None for p in key) + + @staticmethod + def score(key: DeviceProperties, other: DeviceProperties) -> int: + """ + Returns score indicating how similar two instances of the `Properties` tuple are. Points are calculated using + bits, but documented as int. Rules are as follows: + * Matching `type` gives 8 points. + * Semi-matching `type`, for example cuda and rocm, gives 4 points. + * Matching `major` (compute capability major version) gives 2 points. + * Default expectation (if present) gives 1 points. + """ + (device_type, major) = key + (other_device_type, other_major) = other + + score = 0b0 + if device_type == other_device_type: + score |= 0b1000 + elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]: + score |= 0b100 + + if major == other_major and other_major is not None: + score |= 0b10 + + if Expectations.is_default(other): + score |= 0b1 + + return int(score) + + def find_expectation(self, key: DeviceProperties = (None, None)) -> Any: + """ + Find best matching expectation based on provided device properties. + """ + (result_key, result) = max(self.data.items(), key=lambda x: Expectations.score(key, x[0])) + + if Expectations.score(key, result_key) == 0: + raise ValueError(f"No matching expectation found for {key}") + + return result + + def __repr__(self): + return f"{self.data}" From b2da59b197306a49d93db2a28247de9b0f187435 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 28 Aug 2025 16:24:32 +0200 Subject: [PATCH 33/74] [Modular] Provide option to disable custom code loading globally via env variable (#12177) * update * update * update * update --- .../modular_pipelines/modular_pipeline.py | 2 +- src/diffusers/utils/constants.py | 1 + src/diffusers/utils/dynamic_modules_utils.py | 61 ++++++------------- 3 files changed, 19 insertions(+), 45 deletions(-) diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index c53fa81d56..35b9ac24c9 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -299,7 +299,7 @@ class ModularPipelineBlocks(ConfigMixin, PushToHubMixin): def from_pretrained( cls, pretrained_model_name_or_path: str, - trust_remote_code: Optional[bool] = None, + trust_remote_code: bool = False, **kwargs, ): hub_kwargs_names = [ diff --git a/src/diffusers/utils/constants.py b/src/diffusers/utils/constants.py index 2d9e16f87e..d9867fb875 100644 --- a/src/diffusers/utils/constants.py +++ b/src/diffusers/utils/constants.py @@ -45,6 +45,7 @@ DIFFUSERS_ATTN_BACKEND = os.getenv("DIFFUSERS_ATTN_BACKEND", "native") DIFFUSERS_ATTN_CHECKS = os.getenv("DIFFUSERS_ATTN_CHECKS", "0") in ENV_VARS_TRUE_VALUES DEFAULT_HF_PARALLEL_LOADING_WORKERS = 8 HF_ENABLE_PARALLEL_LOADING = os.environ.get("HF_ENABLE_PARALLEL_LOADING", "").upper() in ENV_VARS_TRUE_VALUES +DIFFUSERS_DISABLE_REMOTE_CODE = os.getenv("DIFFUSERS_DISABLE_REMOTE_CODE", "false").lower() in ENV_VARS_TRUE_VALUES # Below should be `True` if the current version of `peft` and `transformers` are compatible with # PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are diff --git a/src/diffusers/utils/dynamic_modules_utils.py b/src/diffusers/utils/dynamic_modules_utils.py index 74ed240bf0..674eb65773 100644 --- a/src/diffusers/utils/dynamic_modules_utils.py +++ b/src/diffusers/utils/dynamic_modules_utils.py @@ -20,7 +20,6 @@ import json import os import re import shutil -import signal import sys import threading from pathlib import Path @@ -34,6 +33,7 @@ from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging +from .constants import DIFFUSERS_DISABLE_REMOTE_CODE logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -159,52 +159,25 @@ def check_imports(filename): return get_relative_imports(filename) -def _raise_timeout_error(signum, frame): - raise ValueError( - "Loading this model requires you to execute custom code contained in the model repository on your local " - "machine. Please set the option `trust_remote_code=True` to permit loading of this model." - ) - - def resolve_trust_remote_code(trust_remote_code, model_name, has_remote_code): - if trust_remote_code is None: - if has_remote_code and TIME_OUT_REMOTE_CODE > 0: - prev_sig_handler = None - try: - prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error) - signal.alarm(TIME_OUT_REMOTE_CODE) - while trust_remote_code is None: - answer = input( - f"The repository for {model_name} contains custom code which must be executed to correctly " - f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" - f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n" - f"Do you wish to run the custom code? [y/N] " - ) - if answer.lower() in ["yes", "y", "1"]: - trust_remote_code = True - elif answer.lower() in ["no", "n", "0", ""]: - trust_remote_code = False - signal.alarm(0) - except Exception: - # OS which does not support signal.SIGALRM - raise ValueError( - f"The repository for {model_name} contains custom code which must be executed to correctly " - f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" - f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." - ) - finally: - if prev_sig_handler is not None: - signal.signal(signal.SIGALRM, prev_sig_handler) - signal.alarm(0) - elif has_remote_code: - # For the CI which puts the timeout at 0 - _raise_timeout_error(None, None) + trust_remote_code = trust_remote_code and not DIFFUSERS_DISABLE_REMOTE_CODE + if DIFFUSERS_DISABLE_REMOTE_CODE: + logger.warning( + "Downloading remote code is disabled globally via the DIFFUSERS_DISABLE_REMOTE_CODE environment variable. Ignoring `trust_remote_code`." + ) if has_remote_code and not trust_remote_code: - raise ValueError( - f"Loading {model_name} requires you to execute the configuration file in that" - " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" - " set the option `trust_remote_code=True` to remove this error." + error_msg = f"The repository for {model_name} contains custom code. " + error_msg += ( + "Downloading remote code is disabled globally via the DIFFUSERS_DISABLE_REMOTE_CODE environment variable." + if DIFFUSERS_DISABLE_REMOTE_CODE + else "Pass `trust_remote_code=True` to allow loading remote code modules." + ) + raise ValueError(error_msg) + + elif has_remote_code and trust_remote_code: + logger.warning( + f"`trust_remote_code` is enabled. Downloading code from {model_name}. Please ensure you trust the contents of this repository" ) return trust_remote_code From ba0e732eb059b9eb3afe4b643be471d93154d1cc Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 28 Aug 2025 16:25:02 +0200 Subject: [PATCH 34/74] [Modular] Consolidate `load_default_components` into `load_components` (#12217) * update * Apply style fixes * update * update --------- Co-authored-by: github-actions[bot] --- .../modular_diffusers/components_manager.md | 6 +-- docs/source/en/modular_diffusers/guiders.md | 6 +-- .../en/modular_diffusers/modular_pipeline.md | 14 +++---- .../source/en/modular_diffusers/quickstart.md | 18 ++++----- .../modular_diffusers/components_manager.md | 6 +-- docs/source/zh/modular_diffusers/guiders.md | 6 +-- .../zh/modular_diffusers/modular_pipeline.md | 12 +++--- .../source/zh/modular_diffusers/quickstart.md | 14 +++---- .../modular_pipelines/modular_pipeline.py | 37 ++++++++----------- ...st_modular_pipeline_stable_diffusion_xl.py | 4 +- .../test_modular_pipelines_common.py | 2 +- 11 files changed, 59 insertions(+), 66 deletions(-) diff --git a/docs/source/en/modular_diffusers/components_manager.md b/docs/source/en/modular_diffusers/components_manager.md index 50fa140724..af53411b95 100644 --- a/docs/source/en/modular_diffusers/components_manager.md +++ b/docs/source/en/modular_diffusers/components_manager.md @@ -51,10 +51,10 @@ t2i_pipeline = t2i_blocks.init_pipeline(modular_repo_id, components_manager=comp -Components are only loaded and registered when using [`~ModularPipeline.load_components`] or [`~ModularPipeline.load_default_components`]. The example below uses [`~ModularPipeline.load_default_components`] to create a second pipeline that reuses all the components from the first one, and assigns it to a different collection +Components are only loaded and registered when using [`~ModularPipeline.load_components`] or [`~ModularPipeline.load_components`]. The example below uses [`~ModularPipeline.load_components`] to create a second pipeline that reuses all the components from the first one, and assigns it to a different collection ```py -pipe.load_default_components() +pipe.load_components() pipe2 = ModularPipeline.from_pretrained("YiYiXu/modular-demo-auto", components_manager=comp, collection="test2") ``` @@ -187,4 +187,4 @@ comp.enable_auto_cpu_offload(device="cuda") All models begin on the CPU and [`ComponentsManager`] moves them to the appropriate device right before they're needed, and moves other models back to the CPU when GPU memory is low. -You can set your own rules for which models to offload first. \ No newline at end of file +You can set your own rules for which models to offload first. diff --git a/docs/source/en/modular_diffusers/guiders.md b/docs/source/en/modular_diffusers/guiders.md index ddf5eb703f..fd0d278442 100644 --- a/docs/source/en/modular_diffusers/guiders.md +++ b/docs/source/en/modular_diffusers/guiders.md @@ -75,13 +75,13 @@ Guiders that are already saved on the Hub with a `modular_model_index.json` file } ``` -The guider is only created after calling [`~ModularPipeline.load_default_components`] based on the loading specification in `modular_model_index.json`. +The guider is only created after calling [`~ModularPipeline.load_components`] based on the loading specification in `modular_model_index.json`. ```py t2i_pipeline = t2i_blocks.init_pipeline("YiYiXu/modular-doc-guider") # not created during init assert t2i_pipeline.guider is None -t2i_pipeline.load_default_components() +t2i_pipeline.load_components() # loaded as PAG guider t2i_pipeline.guider ``` @@ -172,4 +172,4 @@ t2i_pipeline.push_to_hub("YiYiXu/modular-doc-guider") ```
-
\ No newline at end of file + diff --git a/docs/source/en/modular_diffusers/modular_pipeline.md b/docs/source/en/modular_diffusers/modular_pipeline.md index 5bdef66a70..0e0a7bd75d 100644 --- a/docs/source/en/modular_diffusers/modular_pipeline.md +++ b/docs/source/en/modular_diffusers/modular_pipeline.md @@ -29,7 +29,7 @@ blocks = SequentialPipelineBlocks.from_blocks_dict(TEXT2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) -pipeline.load_default_components(torch_dtype=torch.float16) +pipeline.load_components(torch_dtype=torch.float16) pipeline.to("cuda") image = pipeline(prompt="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", output="images")[0] @@ -49,7 +49,7 @@ blocks = SequentialPipelineBlocks.from_blocks_dict(IMAGE2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) -pipeline.load_default_components(torch_dtype=torch.float16) +pipeline.load_components(torch_dtype=torch.float16) pipeline.to("cuda") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" @@ -73,7 +73,7 @@ blocks = SequentialPipelineBlocks.from_blocks_dict(INPAINT_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) -pipeline.load_default_components(torch_dtype=torch.float16) +pipeline.load_components(torch_dtype=torch.float16) pipeline.to("cuda") img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" @@ -176,15 +176,15 @@ diffdiff_pipeline = ModularPipeline.from_pretrained(modular_repo_id, trust_remot ## Loading components -A [`ModularPipeline`] doesn't automatically instantiate with components. It only loads the configuration and component specifications. You can load all components with [`~ModularPipeline.load_default_components`] or only load specific components with [`~ModularPipeline.load_components`]. +A [`ModularPipeline`] doesn't automatically instantiate with components. It only loads the configuration and component specifications. You can load all components with [`~ModularPipeline.load_components`] or only load specific components with [`~ModularPipeline.load_components`]. - + ```py import torch -t2i_pipeline.load_default_components(torch_dtype=torch.float16) +t2i_pipeline.load_components(torch_dtype=torch.float16) t2i_pipeline.to("cuda") ``` @@ -355,4 +355,4 @@ The [config.json](https://huggingface.co/YiYiXu/modular-diffdiff-0704/blob/main/ "ModularPipelineBlocks": "block.DiffDiffBlocks" } } -``` \ No newline at end of file +``` diff --git a/docs/source/en/modular_diffusers/quickstart.md b/docs/source/en/modular_diffusers/quickstart.md index 9898c103f7..9d4eaa0c0c 100644 --- a/docs/source/en/modular_diffusers/quickstart.md +++ b/docs/source/en/modular_diffusers/quickstart.md @@ -173,9 +173,9 @@ print(dd_blocks) ## ModularPipeline -Convert the [`SequentialPipelineBlocks`] into a [`ModularPipeline`] with the [`ModularPipeline.init_pipeline`] method. This initializes the expected components to load from a `modular_model_index.json` file. Explicitly load the components by calling [`ModularPipeline.load_default_components`]. +Convert the [`SequentialPipelineBlocks`] into a [`ModularPipeline`] with the [`ModularPipeline.init_pipeline`] method. This initializes the expected components to load from a `modular_model_index.json` file. Explicitly load the components by calling [`ModularPipeline.load_components`]. -It is a good idea to initialize the [`ComponentManager`] with the pipeline to help manage the different components. Once you call [`~ModularPipeline.load_default_components`], the components are registered to the [`ComponentManager`] and can be shared between workflows. The example below uses the `collection` argument to assign the components a `"diffdiff"` label for better organization. +It is a good idea to initialize the [`ComponentManager`] with the pipeline to help manage the different components. Once you call [`~ModularPipeline.load_components`], the components are registered to the [`ComponentManager`] and can be shared between workflows. The example below uses the `collection` argument to assign the components a `"diffdiff"` label for better organization. ```py from diffusers.modular_pipelines import ComponentsManager @@ -209,11 +209,11 @@ Use the [`sub_blocks.insert`] method to insert it into the [`ModularPipeline`]. dd_blocks.sub_blocks.insert("ip_adapter", ip_adapter_block, 0) ``` -Call [`~ModularPipeline.init_pipeline`] to initialize a [`ModularPipeline`] and use [`~ModularPipeline.load_default_components`] to load the model components. Load and set the IP-Adapter to run the pipeline. +Call [`~ModularPipeline.init_pipeline`] to initialize a [`ModularPipeline`] and use [`~ModularPipeline.load_components`] to load the model components. Load and set the IP-Adapter to run the pipeline. ```py dd_pipeline = dd_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") -dd_pipeline.load_default_components(torch_dtype=torch.float16) +dd_pipeline.load_components(torch_dtype=torch.float16) dd_pipeline.loader.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") dd_pipeline.loader.set_ip_adapter_scale(0.6) dd_pipeline = dd_pipeline.to(device) @@ -260,14 +260,14 @@ class SDXLDiffDiffControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): controlnet_denoise_block = SDXLDiffDiffControlNetDenoiseStep() ``` -Insert the `controlnet_input` block and replace the `denoise` block with the new `controlnet_denoise_block`. Initialize a [`ModularPipeline`] and [`~ModularPipeline.load_default_components`] into it. +Insert the `controlnet_input` block and replace the `denoise` block with the new `controlnet_denoise_block`. Initialize a [`ModularPipeline`] and [`~ModularPipeline.load_components`] into it. ```py dd_blocks.sub_blocks.insert("controlnet_input", control_input_block, 7) dd_blocks.sub_blocks["denoise"] = controlnet_denoise_block dd_pipeline = dd_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") -dd_pipeline.load_default_components(torch_dtype=torch.float16) +dd_pipeline.load_components(torch_dtype=torch.float16) dd_pipeline = dd_pipeline.to(device) control_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/diffdiff_tomato_canny.jpeg") @@ -320,7 +320,7 @@ Call [`SequentialPipelineBlocks.from_blocks_dict`] to create a [`SequentialPipel ```py dd_auto_blocks = SequentialPipelineBlocks.from_blocks_dict(DIFFDIFF_AUTO_BLOCKS) dd_pipeline = dd_auto_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") -dd_pipeline.load_default_components(torch_dtype=torch.float16) +dd_pipeline.load_components(torch_dtype=torch.float16) ``` ## Share @@ -340,5 +340,5 @@ from diffusers.modular_pipelines import ModularPipeline, ComponentsManager components = ComponentsManager() diffdiff_pipeline = ModularPipeline.from_pretrained("YiYiXu/modular-diffdiff-0704", trust_remote_code=True, components_manager=components, collection="diffdiff") -diffdiff_pipeline.load_default_components(torch_dtype=torch.float16) -``` \ No newline at end of file +diffdiff_pipeline.load_components(torch_dtype=torch.float16) +``` diff --git a/docs/source/zh/modular_diffusers/components_manager.md b/docs/source/zh/modular_diffusers/components_manager.md index 8b4425027f..39fef0651d 100644 --- a/docs/source/zh/modular_diffusers/components_manager.md +++ b/docs/source/zh/modular_diffusers/components_manager.md @@ -48,10 +48,10 @@ t2i_pipeline = t2i_blocks.init_pipeline(modular_repo_id, components_manager=comp -组件仅在调用 [`~ModularPipeline.load_components`] 或 [`~ModularPipeline.load_default_components`] 时加载和注册。以下示例使用 [`~ModularPipeline.load_default_components`] 创建第二个管道,重用第一个管道的所有组件,并将其分配到不同的集合。 +组件仅在调用 [`~ModularPipeline.load_components`] 或 [`~ModularPipeline.load_components`] 时加载和注册。以下示例使用 [`~ModularPipeline.load_components`] 创建第二个管道,重用第一个管道的所有组件,并将其分配到不同的集合。 ```py -pipe.load_default_components() +pipe.load_components() pipe2 = ModularPipeline.from_pretrained("YiYiXu/modular-demo-auto", components_manager=comp, collection="test2") ``` @@ -185,4 +185,4 @@ comp.enable_auto_cpu_offload(device="cuda") 所有模型开始时都在 CPU 上,[`ComponentsManager`] 在需要它们之前将它们移动到适当的设备,并在 GPU 内存不足时将其他模型移回 CPU。 -您可以设置自己的规则来决定哪些模型要卸载。 \ No newline at end of file +您可以设置自己的规则来决定哪些模型要卸载。 diff --git a/docs/source/zh/modular_diffusers/guiders.md b/docs/source/zh/modular_diffusers/guiders.md index d0b5fb4312..1006460a2b 100644 --- a/docs/source/zh/modular_diffusers/guiders.md +++ b/docs/source/zh/modular_diffusers/guiders.md @@ -73,13 +73,13 @@ ComponentSpec(name='guider', type_hint= - \ No newline at end of file + diff --git a/docs/source/zh/modular_diffusers/modular_pipeline.md b/docs/source/zh/modular_diffusers/modular_pipeline.md index 47cecea764..daf61ecf40 100644 --- a/docs/source/zh/modular_diffusers/modular_pipeline.md +++ b/docs/source/zh/modular_diffusers/modular_pipeline.md @@ -28,7 +28,7 @@ blocks = SequentialPipelineBlocks.from_blocks_dict(TEXT2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) -pipeline.load_default_components(torch_dtype=torch.float16) +pipeline.load_components(torch_dtype=torch.float16) pipeline.to("cuda") image = pipeline(prompt="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", output="images")[0] @@ -48,7 +48,7 @@ blocks = SequentialPipelineBlocks.from_blocks_dict(IMAGE2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) -pipeline.load_default_components(torch_dtype=torch.float16) +pipeline.load_components(torch_dtype=torch.float16) pipeline.to("cuda") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" @@ -72,7 +72,7 @@ blocks = SequentialPipelineBlocks.from_blocks_dict(INPAINT_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) -pipeline.load_default_components(torch_dtype=torch.float16) +pipeline.load_components(torch_dtype=torch.float16) pipeline.to("cuda") img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" @@ -176,15 +176,15 @@ diffdiff_pipeline = ModularPipeline.from_pretrained(modular_repo_id, trust_remot ## 加载组件 -一个[`ModularPipeline`]不会自动实例化组件。它只加载配置和组件规范。您可以使用[`~ModularPipeline.load_default_components`]加载所有组件,或仅使用[`~ModularPipeline.load_components`]加载特定组件。 +一个[`ModularPipeline`]不会自动实例化组件。它只加载配置和组件规范。您可以使用[`~ModularPipeline.load_components`]加载所有组件,或仅使用[`~ModularPipeline.load_components`]加载特定组件。 - + ```py import torch -t2i_pipeline.load_default_components(torch_dtype=torch.float16) +t2i_pipeline.load_components(torch_dtype=torch.float16) t2i_pipeline.to("cuda") ``` diff --git a/docs/source/zh/modular_diffusers/quickstart.md b/docs/source/zh/modular_diffusers/quickstart.md index 3322aba12c..2c4a6a51af 100644 --- a/docs/source/zh/modular_diffusers/quickstart.md +++ b/docs/source/zh/modular_diffusers/quickstart.md @@ -175,7 +175,7 @@ print(dd_blocks) 将 [`SequentialPipelineBlocks`] 转换为 [`ModularPipeline`],使用 [`ModularPipeline.init_pipeline`] 方法。这会初始化从 `modular_model_index.json` 文件加载的预期组件。通过调用 [`ModularPipeline.load_defau lt_components`]。 -初始化[`ComponentManager`]时传入pipeline是一个好主意,以帮助管理不同的组件。一旦调用[`~ModularPipeline.load_default_components`],组件就会被注册到[`ComponentManager`]中,并且可以在工作流之间共享。下面的例子使用`collection`参数为组件分配了一个`"diffdiff"`标签,以便更好地组织。 +初始化[`ComponentManager`]时传入pipeline是一个好主意,以帮助管理不同的组件。一旦调用[`~ModularPipeline.load_components`],组件就会被注册到[`ComponentManager`]中,并且可以在工作流之间共享。下面的例子使用`collection`参数为组件分配了一个`"diffdiff"`标签,以便更好地组织。 ```py from diffusers.modular_pipelines import ComponentsManager @@ -209,11 +209,11 @@ ip_adapter_block = StableDiffusionXLAutoIPAdapterStep() dd_blocks.sub_blocks.insert("ip_adapter", ip_adapter_block, 0) ``` -调用[`~ModularPipeline.init_pipeline`]来初始化一个[`ModularPipeline`],并使用[`~ModularPipeline.load_default_components`]加载模型组件。加载并设置IP-Adapter以运行pipeline。 +调用[`~ModularPipeline.init_pipeline`]来初始化一个[`ModularPipeline`],并使用[`~ModularPipeline.load_components`]加载模型组件。加载并设置IP-Adapter以运行pipeline。 ```py dd_pipeline = dd_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") -dd_pipeline.load_default_components(torch_dtype=torch.float16) +dd_pipeline.load_components(torch_dtype=torch.float16) dd_pipeline.loader.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") dd_pipeline.loader.set_ip_adapter_scale(0.6) dd_pipeline = dd_pipeline.to(device) @@ -261,14 +261,14 @@ class SDXLDiffDiffControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): controlnet_denoise_block = SDXLDiffDiffControlNetDenoiseStep() ``` -插入 `controlnet_input` 块并用新的 `controlnet_denoise_block` 替换 `denoise` 块。初始化一个 [`ModularPipeline`] 并将 [`~ModularPipeline.load_default_components`] 加载到其中。 +插入 `controlnet_input` 块并用新的 `controlnet_denoise_block` 替换 `denoise` 块。初始化一个 [`ModularPipeline`] 并将 [`~ModularPipeline.load_components`] 加载到其中。 ```py dd_blocks.sub_blocks.insert("controlnet_input", control_input_block, 7) dd_blocks.sub_blocks["denoise"] = controlnet_denoise_block dd_pipeline = dd_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") -dd_pipeline.load_default_components(torch_dtype=torch.float16) +dd_pipeline.load_components(torch_dtype=torch.float16) dd_pipeline = dd_pipeline.to(device) control_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/diffdiff_tomato_canny.jpeg") @@ -322,7 +322,7 @@ DIFFDIFF_AUTO_BLOCKS.insert("controlnet_input",StableDiffusionXLControlNetAutoIn ```py dd_auto_blocks = SequentialPipelineBlocks.from_blocks_dict(DIFFDIFF_AUTO_BLOCKS) dd_pipeline = dd_auto_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") -dd_pipeline.load_default_components(torch_dtype=torch.float16) +dd_pipeline.load_components(torch_dtype=torch.float16) ``` ## 分享 @@ -342,5 +342,5 @@ from diffusers.modular_pipelines import ModularPipeline, ComponentsManager components = ComponentsManager() diffdiff_pipeline = ModularPipeline.from_pretrained("YiYiXu/modular-diffdiff-0704", trust_remote_code=True, components_manager=components, collection="diffdiff") -diffdiff_pipeline.load_default_components(torch_dtype=torch.float16) +diffdiff_pipeline.load_components(torch_dtype=torch.float16) ``` diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index 35b9ac24c9..3918679c16 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -1418,7 +1418,7 @@ class LoopSequentialPipelineBlocks(ModularPipelineBlocks): # YiYi TODO: # 1. look into the serialization of modular_model_index.json, make sure the items are properly ordered like model_index.json (currently a mess) # 2. do we need ConfigSpec? the are basically just key/val kwargs -# 3. imnprove docstring and potentially add validator for methods where we accpet kwargs to be passed to from_pretrained/save_pretrained/load_default_components(), load_components() +# 3. imnprove docstring and potentially add validator for methods where we accpet kwargs to be passed to from_pretrained/save_pretrained/load_components() class ModularPipeline(ConfigMixin, PushToHubMixin): """ Base class for all Modular pipelines. @@ -1488,7 +1488,7 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): - Components with default_creation_method="from_config" are created immediately, its specs are not included in config dict and will not be saved in `modular_model_index.json` - Components with default_creation_method="from_pretrained" are set to None and can be loaded later with - `load_default_components()`/`load_components()` + `load_components()` (with or without specific component names) - The pipeline's config dict is populated with component specs (only for from_pretrained components) and config values, which will be saved as `modular_model_index.json` during `save_pretrained` - The pipeline's config dict is also used to store the pipeline blocks's class name, which will be saved as @@ -1603,20 +1603,6 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): params[input_param.name] = input_param.default return params - def load_default_components(self, **kwargs): - """ - Load from_pretrained components using the loading specs in the config dict. - - Args: - **kwargs: Additional arguments passed to `from_pretrained` method, e.g. torch_dtype, cache_dir, etc. - """ - names = [ - name - for name in self._component_specs.keys() - if self._component_specs[name].default_creation_method == "from_pretrained" - ] - self.load_components(names=names, **kwargs) - @classmethod @validate_hf_hub_args def from_pretrained( @@ -1770,8 +1756,8 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): - non from_pretrained components are created during __init__ and registered as the object itself - Components are updated with the `update_components()` method: e.g. loader.update_components(unet=unet) or loader.update_components(guider=guider_spec) - - (from_pretrained) Components are loaded with the `load_default_components()` method: e.g. - loader.load_default_components(names=["unet"]) + - (from_pretrained) Components are loaded with the `load_components()` method: e.g. + loader.load_components(names=["unet"]) or loader.load_components() to load all default components Args: **kwargs: Keyword arguments where keys are component names and values are component objects. @@ -2097,13 +2083,14 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): self.register_to_config(**config_to_register) # YiYi TODO: support map for additional from_pretrained kwargs - # YiYi/Dhruv TODO: consolidate load_components and load_default_components? - def load_components(self, names: Union[List[str], str], **kwargs): + def load_components(self, names: Optional[Union[List[str], str]] = None, **kwargs): """ Load selected components from specs. Args: - names: List of component names to load; by default will not load any components + names: List of component names to load. If None, will load all components with + default_creation_method == "from_pretrained". If provided as a list or string, will load only the + specified components. **kwargs: additional kwargs to be passed to `from_pretrained()`.Can be: - a single value to be applied to all components to be loaded, e.g. torch_dtype=torch.bfloat16 - a dict, e.g. torch_dtype={"unet": torch.bfloat16, "default": torch.float32} @@ -2111,7 +2098,13 @@ class ModularPipeline(ConfigMixin, PushToHubMixin): `variant`, `revision`, etc. """ - if isinstance(names, str): + if names is None: + names = [ + name + for name in self._component_specs.keys() + if self._component_specs[name].default_creation_method == "from_pretrained" + ] + elif isinstance(names, str): names = [names] elif not isinstance(names, list): raise ValueError(f"Invalid type for names: {type(names)}") diff --git a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py index efc91416d0..d05f818135 100644 --- a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py +++ b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py @@ -67,7 +67,7 @@ class SDXLModularTests: def get_pipeline(self, components_manager=None, torch_dtype=torch.float32): pipeline = self.pipeline_blocks_class().init_pipeline(self.repo, components_manager=components_manager) - pipeline.load_default_components(torch_dtype=torch_dtype) + pipeline.load_components(torch_dtype=torch_dtype) return pipeline def get_dummy_inputs(self, device, seed=0): @@ -158,7 +158,7 @@ class SDXLModularIPAdapterTests: blocks = self.pipeline_blocks_class() _ = blocks.sub_blocks.pop("ip_adapter") pipe = blocks.init_pipeline(self.repo) - pipe.load_default_components(torch_dtype=torch.float32) + pipe.load_components(torch_dtype=torch.float32) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim") diff --git a/tests/modular_pipelines/test_modular_pipelines_common.py b/tests/modular_pipelines/test_modular_pipelines_common.py index 6e61272693..d309fcf353 100644 --- a/tests/modular_pipelines/test_modular_pipelines_common.py +++ b/tests/modular_pipelines/test_modular_pipelines_common.py @@ -344,7 +344,7 @@ class ModularPipelineTesterMixin: with tempfile.TemporaryDirectory() as tmpdirname: base_pipe.save_pretrained(tmpdirname) pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device) - pipe.load_default_components(torch_dtype=torch.float32) + pipe.load_components(torch_dtype=torch.float32) pipe.to(torch_device) pipes.append(pipe) From 9b721db205729d5a6e97a72312c3a0f4534064f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nguy=E1=BB=85n=20Tr=E1=BB=8Dng=20Tu=E1=BA=A5n?= <119487916+Trgtuan10@users.noreply.github.com> Date: Sat, 30 Aug 2025 13:16:43 +0700 Subject: [PATCH 35/74] [QwenImageEditPipeline] Add image entry in __call__ function (#12254) add entry Co-authored-by: TuanNT-ZenAI --- .../pipelines/qwenimage/pipeline_qwenimage_edit.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py index ceb5492fab..977f2790a3 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py @@ -551,6 +551,12 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Function invoked when calling the pipeline for generation. Args: + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. From 827fad66a02745093de94e8a926f74e896833b2a Mon Sep 17 00:00:00 2001 From: Leo Jiang Date: Sun, 31 Aug 2025 04:18:51 +0800 Subject: [PATCH 36/74] Improve performance of NPU FA (#12260) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: J石页 Co-authored-by: Aryan --- src/diffusers/models/attention_dispatch.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/attention_dispatch.py b/src/diffusers/models/attention_dispatch.py index 6a05aac215..e123f4c193 100644 --- a/src/diffusers/models/attention_dispatch.py +++ b/src/diffusers/models/attention_dispatch.py @@ -955,12 +955,13 @@ def _native_npu_attention( dropout_p: float = 0.0, scale: Optional[float] = None, ) -> torch.Tensor: - return npu_fusion_attention( + query, key, value = (x.transpose(1, 2).contiguous() for x in (query, key, value)) + out = npu_fusion_attention( query, key, value, - query.size(2), # num_heads - input_layout="BSND", + query.size(1), # num_heads + input_layout="BNSD", pse=None, scale=1.0 / math.sqrt(query.shape[-1]) if scale is None else scale, pre_tockens=65536, @@ -969,6 +970,8 @@ def _native_npu_attention( sync=False, inner_precise=0, )[0] + out = out.transpose(1, 2).contiguous() + return out # Reference: https://github.com/pytorch/xla/blob/06c5533de6588f6b90aa1655d9850bcf733b90b4/torch_xla/experimental/custom_kernel.py#L853 From 67ffa7031e5a4bf0991b692a424e36ca59e64ec9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nguy=E1=BB=85n=20Tr=E1=BB=8Dng=20Tu=E1=BA=A5n?= <119487916+Trgtuan10@users.noreply.github.com> Date: Sun, 31 Aug 2025 12:49:15 +0700 Subject: [PATCH 37/74] Add Qwen-Image-Edit Inpainting pipeline (#12225) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add qwenimage-edit inpaint feature * stay up to date with main branch * fix style * fix docs * copies * fix * again * copies --------- Co-authored-by: “Trgtuan10” <“tuannguyentrong.402@gmail.com”> Co-authored-by: TuanNT-ZenAI Co-authored-by: yiyixuxu --- docs/source/en/api/pipelines/qwenimage.md | 6 + src/diffusers/__init__.py | 2 + src/diffusers/pipelines/__init__.py | 2 + src/diffusers/pipelines/qwenimage/__init__.py | 2 + .../pipeline_qwenimage_edit_inpaint.py | 1106 +++++++++++++++++ .../dummy_torch_and_transformers_objects.py | 15 + 6 files changed, 1133 insertions(+) create mode 100644 src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py diff --git a/docs/source/en/api/pipelines/qwenimage.md b/docs/source/en/api/pipelines/qwenimage.md index 518938131b..2dec47309c 100644 --- a/docs/source/en/api/pipelines/qwenimage.md +++ b/docs/source/en/api/pipelines/qwenimage.md @@ -120,6 +120,12 @@ The `guidance_scale` parameter in the pipeline is there to support future guidan - all - __call__ +## QwenImageEditInpaintPipeline + +[[autodoc]] QwenImageEditInpaintPipeline + - all + - __call__ + ## QwenImaggeControlNetPipeline - all - __call__ diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index a606941f1d..762ae3846a 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -494,6 +494,7 @@ else: "PixArtSigmaPAGPipeline", "PixArtSigmaPipeline", "QwenImageControlNetPipeline", + "QwenImageEditInpaintPipeline", "QwenImageEditPipeline", "QwenImageImg2ImgPipeline", "QwenImageInpaintPipeline", @@ -1134,6 +1135,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: PixArtSigmaPAGPipeline, PixArtSigmaPipeline, QwenImageControlNetPipeline, + QwenImageEditInpaintPipeline, QwenImageEditPipeline, QwenImageImg2ImgPipeline, QwenImageInpaintPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index b3cfc62287..25d5d213cf 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -393,6 +393,7 @@ else: "QwenImageImg2ImgPipeline", "QwenImageInpaintPipeline", "QwenImageEditPipeline", + "QwenImageEditInpaintPipeline", "QwenImageControlNetPipeline", ] try: @@ -714,6 +715,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline from .qwenimage import ( QwenImageControlNetPipeline, + QwenImageEditInpaintPipeline, QwenImageEditPipeline, QwenImageImg2ImgPipeline, QwenImageInpaintPipeline, diff --git a/src/diffusers/pipelines/qwenimage/__init__.py b/src/diffusers/pipelines/qwenimage/__init__.py index bcf0911e0f..ae5cf04dc5 100644 --- a/src/diffusers/pipelines/qwenimage/__init__.py +++ b/src/diffusers/pipelines/qwenimage/__init__.py @@ -26,6 +26,7 @@ else: _import_structure["pipeline_qwenimage"] = ["QwenImagePipeline"] _import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"] _import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"] + _import_structure["pipeline_qwenimage_edit_inpaint"] = ["QwenImageEditInpaintPipeline"] _import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"] _import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"] @@ -39,6 +40,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_qwenimage import QwenImagePipeline from .pipeline_qwenimage_controlnet import QwenImageControlNetPipeline from .pipeline_qwenimage_edit import QwenImageEditPipeline + from .pipeline_qwenimage_edit_inpaint import QwenImageEditInpaintPipeline from .pipeline_qwenimage_img2img import QwenImageImg2ImgPipeline from .pipeline_qwenimage_inpaint import QwenImageInpaintPipeline else: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py new file mode 100644 index 0000000000..b064c40bca --- /dev/null +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py @@ -0,0 +1,1106 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import QwenImageLoraLoaderMixin +from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import QwenImagePipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from PIL import Image + >>> from diffusers import QwenImageEditInpaintPipeline + >>> from diffusers.utils import load_image + + >>> pipe = QwenImageEditInpaintPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + >>> source = load_image(img_url) + >>> mask = load_image(mask_url) + >>> image = pipe( + ... prompt=prompt, negative_prompt=" ", image=source, mask_image=mask, strength=1.0, num_inference_steps=50 + ... ).images[0] + >>> image.save("qwenimage_inpainting.png") + ``` +""" + + +# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.15, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit.calculate_dimensions +def calculate_dimensions(target_area, ratio): + width = math.sqrt(target_area * ratio) + height = width / ratio + + width = round(width / 32) * 32 + height = round(height / 32) * 32 + + return width, height, None + + +class QwenImageEditInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): + r""" + The Qwen-Image-Edit pipeline for image editing. + + Args: + transformer ([`QwenImageTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`Qwen2.5-VL-7B-Instruct`]): + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant. + tokenizer (`QwenTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + """ + + model_cpu_offload_seq = "text_encoder->transformer->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKLQwenImage, + text_encoder: Qwen2_5_VLForConditionalGeneration, + tokenizer: Qwen2Tokenizer, + processor: Qwen2VLProcessor, + transformer: QwenImageTransformer2DModel, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + processor=processor, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 + self.latent_channels = self.vae.config.z_dim if getattr(self, "vae", None) else 16 + # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible + # by the patch size. So the vae scale factor is multiplied by the patch size to account for this + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor * 2, + vae_latent_channels=self.latent_channels, + do_normalize=False, + do_binarize=True, + do_convert_grayscale=True, + ) + self.vl_processor = processor + self.tokenizer_max_length = 1024 + + self.prompt_template_encode = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n" + self.prompt_template_encode_start_idx = 64 + self.default_sample_size = 128 + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden + def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor): + bool_mask = mask.bool() + valid_lengths = bool_mask.sum(dim=1) + selected = hidden_states[bool_mask] + split_result = torch.split(selected, valid_lengths.tolist(), dim=0) + + return split_result + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit.QwenImageEditPipeline._get_qwen_prompt_embeds + def _get_qwen_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + image: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + + template = self.prompt_template_encode + drop_idx = self.prompt_template_encode_start_idx + txt = [template.format(e) for e in prompt] + + model_inputs = self.processor( + text=txt, + images=image, + padding=True, + return_tensors="pt", + ).to(device) + + outputs = self.text_encoder( + input_ids=model_inputs.input_ids, + attention_mask=model_inputs.attention_mask, + pixel_values=model_inputs.pixel_values, + image_grid_thw=model_inputs.image_grid_thw, + output_hidden_states=True, + ) + + hidden_states = outputs.hidden_states[-1] + split_hidden_states = self._extract_masked_hidden(hidden_states, model_inputs.attention_mask) + split_hidden_states = [e[drop_idx:] for e in split_hidden_states] + attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states] + max_seq_len = max([e.size(0) for e in split_hidden_states]) + prompt_embeds = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states] + ) + encoder_attention_mask = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list] + ) + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + return prompt_embeds, encoder_attention_mask + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit.QwenImageEditPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + image: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + max_sequence_length: int = 1024, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + image (`torch.Tensor`, *optional*): + image to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, image, device) + + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1) + prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len) + + return prompt_embeds, prompt_embeds_mask + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_inpaint.QwenImageInpaintPipeline.check_inputs + def check_inputs( + self, + prompt, + image, + mask_image, + strength, + height, + width, + output_type, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_embeds_mask=None, + negative_prompt_embeds_mask=None, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + max_sequence_length=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: + logger.warning( + f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_embeds_mask is None: + raise ValueError( + "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`." + ) + if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.") + + if max_sequence_length is not None and max_sequence_length > 1024: + raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}") + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (vae_scale_factor * 2)) + width = 2 * (int(width) // (vae_scale_factor * 2)) + + latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), 1, height, width) + + return latents + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_img2img.QwenImageImg2ImgPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.vae.config.z_dim, 1, 1, 1) + .to(image_latents.device, image_latents.dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + image_latents.device, image_latents.dtype + ) + + image_latents = (image_latents - latents_mean) * latents_std + + return image_latents + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(num_inference_steps * strength, num_inference_steps) + + t_start = int(max(num_inference_steps - init_timestep, 0)) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_inpaint.QwenImageInpaintPipeline.prepare_latents + def prepare_latents( + self, + image, + timestep, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (self.vae_scale_factor * 2)) + width = 2 * (int(width) // (self.vae_scale_factor * 2)) + + shape = (batch_size, 1, num_channels_latents, height, width) + + # If image is [B,C,H,W] -> add T=1. If it's already [B,C,T,H,W], leave it. + if image.dim() == 4: + image = image.unsqueeze(2) + elif image.dim() != 5: + raise ValueError(f"Expected image dims 4 or 5, got {image.dim()}.") + + if latents is not None: + return latents.to(device=device, dtype=dtype) + + image = image.to(device=device, dtype=dtype) + if image.shape[1] != self.latent_channels: + image_latents = self._encode_vae_image(image=image, generator=generator) # [B,z,1,H',W'] + else: + image_latents = image + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + image_latents = image_latents.transpose(1, 2) # [B,1,z,H',W'] + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.scale_noise(image_latents, timestep, noise) + else: + noise = latents.to(device) + latents = noise + + noise = self._pack_latents(noise, batch_size, num_channels_latents, height, width) + image_latents = self._pack_latents(image_latents, batch_size, num_channels_latents, height, width) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + + return latents, noise, image_latents + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_inpaint.QwenImageInpaintPipeline.prepare_mask_latents + def prepare_mask_latents( + self, + mask, + masked_image, + batch_size, + num_channels_latents, + num_images_per_prompt, + height, + width, + dtype, + device, + generator, + ): + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (self.vae_scale_factor * 2)) + width = 2 * (int(width) // (self.vae_scale_factor * 2)) + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate(mask, size=(height, width)) + mask = mask.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if masked_image.dim() == 4: + masked_image = masked_image.unsqueeze(2) + elif masked_image.dim() != 5: + raise ValueError(f"Expected image dims 4 or 5, got {masked_image.dim()}.") + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == self.latent_channels: + masked_image_latents = masked_image + else: + masked_image_latents = self._encode_vae_image(image=masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1, 1) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + + masked_image_latents = self._pack_latents( + masked_image_latents, + batch_size, + num_channels_latents, + height, + width, + ) + mask = self._pack_latents( + mask.repeat(1, num_channels_latents, 1, 1), + batch_size, + num_channels_latents, + height, + width, + ) + + return mask, masked_image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def attention_kwargs(self): + return self._attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def current_timestep(self): + return self._current_timestep + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Optional[PipelineImageInput] = None, + prompt: Union[str, List[str]] = None, + negative_prompt: Union[str, List[str]] = None, + mask_image: PipelineImageInput = None, + masked_image_latents: PipelineImageInput = None, + true_cfg_scale: float = 4.0, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.6, + num_inference_steps: int = 50, + sigmas: Optional[List[float]] = None, + guidance_scale: Optional[float] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is + not greater than `1`). + true_cfg_scale (`float`, *optional*, defaults to 1.0): + true_cfg_scale (`float`, *optional*, defaults to 1.0): Guidance scale as defined in [Classifier-Free + Diffusion Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of + equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is + enabled by setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale + encourages to generate images that are closely linked to the text `prompt`, usually at the expense of + lower image quality. + mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, + 1)`, or `(H, W)`. + mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): + `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask + latents tensor will ge generated by `mask_image`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to None): + A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance + where the guidance scale is applied during inference through noise prediction rescaling, guidance + distilled models take the guidance scale directly as an input parameter during forward pass. Guidance + scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images + that are closely linked to the text `prompt`, usually at the expense of lower image quality. This + parameter in the pipeline is there to support future guidance-distilled models when they come up. It is + ignored when not using guidance distilled models. To enable traditional classifier-free guidance, + please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should + enable classifier-free guidance computations). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will be generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple. + attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is a list with the generated images. + """ + image_size = image[0].size if isinstance(image, list) else image.size + calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, image_size[0] / image_size[1]) + + # height and width are the same as the calculated height and width + height = calculated_height + width = calculated_width + + multiple_of = self.vae_scale_factor * 2 + width = width // multiple_of * multiple_of + height = height // multiple_of * multiple_of + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + image, + mask_image, + strength, + height, + width, + output_type=output_type, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + negative_prompt_embeds_mask=negative_prompt_embeds_mask, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + padding_mask_crop=padding_mask_crop, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._attention_kwargs = attention_kwargs + self._current_timestep = None + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # 3. Preprocess image + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels): + image = self.image_processor.resize(image, calculated_height, calculated_width) + original_image = image + prompt_image = image + image = self.image_processor.preprocess( + image, + height=calculated_height, + width=calculated_width, + crops_coords=crops_coords, + resize_mode=resize_mode, + ) + image = image.to(dtype=torch.float32) + + has_neg_prompt = negative_prompt is not None or ( + negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None + ) + + if true_cfg_scale > 1 and not has_neg_prompt: + logger.warning( + f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." + ) + elif true_cfg_scale <= 1 and has_neg_prompt: + logger.warning( + " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" + ) + + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + prompt_embeds, prompt_embeds_mask = self.encode_prompt( + image=prompt_image, + prompt=prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + if do_true_cfg: + negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt( + image=prompt_image, + prompt=negative_prompt, + prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=negative_prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + # 4. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas + image_seq_len = (int(height) // self.vae_scale_factor // 2) * (int(width) // self.vae_scale_factor // 2) + mu = calculate_shift( + image_seq_len, + self.scheduler.config.get("base_image_seq_len", 256), + self.scheduler.config.get("max_image_seq_len", 4096), + self.scheduler.config.get("base_shift", 0.5), + self.scheduler.config.get("max_shift", 1.15), + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + sigmas=sigmas, + mu=mu, + ) + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, noise, image_latents = self.prepare_latents( + image, + latent_timestep, + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + mask_condition = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is None: + masked_image = image * (mask_condition < 0.5) + else: + masked_image = masked_image_latents + + mask, masked_image_latents = self.prepare_mask_latents( + mask_condition, + masked_image, + batch_size, + num_channels_latents, + num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + ) + + img_shapes = [ + [ + (1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2), + (1, calculated_height // self.vae_scale_factor // 2, calculated_width // self.vae_scale_factor // 2), + ] + ] * batch_size + + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # handle guidance + if self.transformer.config.guidance_embeds and guidance_scale is None: + raise ValueError("guidance_scale is required for guidance-distilled model.") + elif self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + elif not self.transformer.config.guidance_embeds and guidance_scale is not None: + logger.warning( + f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." + ) + guidance = None + elif not self.transformer.config.guidance_embeds and guidance_scale is None: + guidance = None + + if self.attention_kwargs is None: + self._attention_kwargs = {} + + txt_seq_lens = prompt_embeds_mask.sum(dim=1).tolist() if prompt_embeds_mask is not None else None + negative_txt_seq_lens = ( + negative_prompt_embeds_mask.sum(dim=1).tolist() if negative_prompt_embeds_mask is not None else None + ) + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + + latent_model_input = latents + if image_latents is not None: + latent_model_input = torch.cat([latents, image_latents], dim=1) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + with self.transformer.cache_context("cond"): + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep / 1000, + guidance=guidance, + encoder_hidden_states_mask=prompt_embeds_mask, + encoder_hidden_states=prompt_embeds, + img_shapes=img_shapes, + txt_seq_lens=txt_seq_lens, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + noise_pred = noise_pred[:, : latents.size(1)] + + if do_true_cfg: + with self.transformer.cache_context("uncond"): + neg_noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep / 1000, + guidance=guidance, + encoder_hidden_states_mask=negative_prompt_embeds_mask, + encoder_hidden_states=negative_prompt_embeds, + img_shapes=img_shapes, + txt_seq_lens=negative_txt_seq_lens, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + neg_noise_pred = neg_noise_pred[:, : latents.size(1)] + comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) + + cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True) + noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True) + noise_pred = comb_pred * (cond_norm / noise_norm) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + # for 64 channel transformer only. + init_latents_proper = image_latents + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.scale_noise( + init_latents_proper, torch.tensor([noise_timestep]), noise + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + self._current_timestep = None + if output_type == "latent": + image = latents + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = latents.to(self.vae.dtype) + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.vae.config.z_dim, 1, 1, 1) + .to(latents.device, latents.dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + latents.device, latents.dtype + ) + latents = latents / latents_std + latents_mean + image = self.vae.decode(latents, return_dict=False)[0][:, :, 0] + image = self.image_processor.postprocess(image, output_type=output_type) + + if padding_mask_crop is not None: + image = [ + self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image + ] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return QwenImagePipelineOutput(images=image) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 22dfc5fcca..91eefc5c10 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1772,6 +1772,21 @@ class QwenImageControlNetPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class QwenImageEditInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class QwenImageEditPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] From 901da9dccc6de15f7f4fafbacddc1d3533114f8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?apolin=C3=A1rio?= Date: Mon, 1 Sep 2025 09:54:38 +0100 Subject: [PATCH 38/74] Fix lora conversion function for ai-toolkit Qwen Image LoRAs (#12261) * Fix lora conversion function for ai-toolkit Qwen Image LoRAs * add forgotten parenthesis * remove space new line * update pipeline * detect if arrow or letter * remove whitespaces * style * apply suggestion * apply suggestion * apply suggestion --------- Co-authored-by: Sayak Paul --- .../loaders/lora_conversion_utils.py | 59 ++++++++++++------- src/diffusers/loaders/lora_pipeline.py | 3 +- 2 files changed, 41 insertions(+), 21 deletions(-) diff --git a/src/diffusers/loaders/lora_conversion_utils.py b/src/diffusers/loaders/lora_conversion_utils.py index d1692bd61b..6f584a5f0e 100644 --- a/src/diffusers/loaders/lora_conversion_utils.py +++ b/src/diffusers/loaders/lora_conversion_utils.py @@ -2129,6 +2129,10 @@ def _convert_non_diffusers_ltxv_lora_to_diffusers(state_dict, non_diffusers_pref def _convert_non_diffusers_qwen_lora_to_diffusers(state_dict): + has_diffusion_model = any(k.startswith("diffusion_model.") for k in state_dict) + if has_diffusion_model: + state_dict = {k.removeprefix("diffusion_model."): v for k, v in state_dict.items()} + has_lora_unet = any(k.startswith("lora_unet_") for k in state_dict) if has_lora_unet: state_dict = {k.removeprefix("lora_unet_"): v for k, v in state_dict.items()} @@ -2201,29 +2205,44 @@ def _convert_non_diffusers_qwen_lora_to_diffusers(state_dict): all_keys = list(state_dict.keys()) down_key = ".lora_down.weight" up_key = ".lora_up.weight" + a_key = ".lora_A.weight" + b_key = ".lora_B.weight" - def get_alpha_scales(down_weight, alpha_key): - rank = down_weight.shape[0] - alpha = state_dict.pop(alpha_key).item() - scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here - scale_down = scale - scale_up = 1.0 - while scale_down * 2 < scale_up: - scale_down *= 2 - scale_up /= 2 - return scale_down, scale_up + has_non_diffusers_lora_id = any(down_key in k or up_key in k for k in all_keys) + has_diffusers_lora_id = any(a_key in k or b_key in k for k in all_keys) - for k in all_keys: - if k.endswith(down_key): - diffusers_down_key = k.replace(down_key, ".lora_A.weight") - diffusers_up_key = k.replace(down_key, up_key).replace(up_key, ".lora_B.weight") - alpha_key = k.replace(down_key, ".alpha") + if has_non_diffusers_lora_id: - down_weight = state_dict.pop(k) - up_weight = state_dict.pop(k.replace(down_key, up_key)) - scale_down, scale_up = get_alpha_scales(down_weight, alpha_key) - converted_state_dict[diffusers_down_key] = down_weight * scale_down - converted_state_dict[diffusers_up_key] = up_weight * scale_up + def get_alpha_scales(down_weight, alpha_key): + rank = down_weight.shape[0] + alpha = state_dict.pop(alpha_key).item() + scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here + scale_down = scale + scale_up = 1.0 + while scale_down * 2 < scale_up: + scale_down *= 2 + scale_up /= 2 + return scale_down, scale_up + + for k in all_keys: + if k.endswith(down_key): + diffusers_down_key = k.replace(down_key, ".lora_A.weight") + diffusers_up_key = k.replace(down_key, up_key).replace(up_key, ".lora_B.weight") + alpha_key = k.replace(down_key, ".alpha") + + down_weight = state_dict.pop(k) + up_weight = state_dict.pop(k.replace(down_key, up_key)) + scale_down, scale_up = get_alpha_scales(down_weight, alpha_key) + converted_state_dict[diffusers_down_key] = down_weight * scale_down + converted_state_dict[diffusers_up_key] = up_weight * scale_up + + # Already in diffusers format (lora_A/lora_B), just pop + elif has_diffusers_lora_id: + for k in all_keys: + if a_key in k or b_key in k: + converted_state_dict[k] = state_dict.pop(k) + elif ".alpha" in k: + state_dict.pop(k) if len(state_dict) > 0: raise ValueError(f"`state_dict` should be empty at this point but has {state_dict.keys()=}") diff --git a/src/diffusers/loaders/lora_pipeline.py b/src/diffusers/loaders/lora_pipeline.py index 572ace472f..7e89066f1f 100644 --- a/src/diffusers/loaders/lora_pipeline.py +++ b/src/diffusers/loaders/lora_pipeline.py @@ -6684,7 +6684,8 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin): has_alphas_in_sd = any(k.endswith(".alpha") for k in state_dict) has_lora_unet = any(k.startswith("lora_unet_") for k in state_dict) - if has_alphas_in_sd or has_lora_unet: + has_diffusion_model = any(k.startswith("diffusion_model.") for k in state_dict) + if has_alphas_in_sd or has_lora_unet or has_diffusion_model: state_dict = _convert_non_diffusers_qwen_lora_to_diffusers(state_dict) out = (state_dict, metadata) if return_lora_metadata else state_dict From 0ff1aa910cf3d87193af79ec1ae4487be542e872 Mon Sep 17 00:00:00 2001 From: Bulat Akhmatov <88503011+Brvcket@users.noreply.github.com> Date: Mon, 1 Sep 2025 14:12:14 +0300 Subject: [PATCH 39/74] [fix] fix for prior preservation and mixed precision sampling (#11873) Co-authored-by: Linoy Tsaban <57615435+linoytsaban@users.noreply.github.com> Co-authored-by: Sayak Paul --- examples/dreambooth/train_dreambooth_lora_flux_kontext.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py index ffeef7b4b3..87e0d2c29e 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py +++ b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py @@ -1270,6 +1270,7 @@ def main(args): subfolder="transformer", revision=args.revision, variant=args.variant, + torch_dtype=torch_dtype, ) pipeline = FluxKontextPipeline.from_pretrained( args.pretrained_model_name_or_path, @@ -1292,7 +1293,8 @@ def main(args): for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): - images = pipeline(example["prompt"]).images + with torch.autocast(device_type=accelerator.device.type, dtype=torch_dtype): + images = pipeline(prompt=example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() @@ -1899,6 +1901,10 @@ def main(args): device=accelerator.device, prompt=args.instance_prompt, ) + else: + prompt_embeds, pooled_prompt_embeds, text_ids = compute_text_embeddings( + prompts, text_encoders, tokenizers + ) # Convert images to latent space if args.cache_latents: From 9e4a75b1420769f890adb5a9a112d16031fb3530 Mon Sep 17 00:00:00 2001 From: Ziheng Zhang Date: Tue, 2 Sep 2025 10:34:16 +0800 Subject: [PATCH 40/74] [docs] Fix VAE scale factor calculation in distributed inference docs (#12259) docs: Fix VAE scale factor calculation --- docs/source/en/training/distributed_inference.md | 2 +- docs/source/zh/training/distributed_inference.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/training/distributed_inference.md b/docs/source/en/training/distributed_inference.md index 64b1ea9f04..a536703f5b 100644 --- a/docs/source/en/training/distributed_inference.md +++ b/docs/source/en/training/distributed_inference.md @@ -223,7 +223,7 @@ from diffusers.image_processor import VaeImageProcessor import torch vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=torch.bfloat16).to("cuda") -vae_scale_factor = 2 ** (len(vae.config.block_out_channels)) +vae_scale_factor = 2 ** (len(vae.config.block_out_channels) - 1) image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor) with torch.no_grad(): diff --git a/docs/source/zh/training/distributed_inference.md b/docs/source/zh/training/distributed_inference.md index ec35b5e730..e0537735b2 100644 --- a/docs/source/zh/training/distributed_inference.md +++ b/docs/source/zh/training/distributed_inference.md @@ -223,7 +223,7 @@ from diffusers.image_processor import VaeImageProcessor import torch vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=torch.bfloat16).to("cuda") -vae_scale_factor = 2 ** (len(vae.config.block_out_channels)) +vae_scale_factor = 2 ** (len(vae.config.block_out_channels) - 1) image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor) with torch.no_grad(): From 006d09275122d6c38ecd6e22dfe23630f44095f5 Mon Sep 17 00:00:00 2001 From: Linoy Tsaban <57615435+linoytsaban@users.noreply.github.com> Date: Tue, 2 Sep 2025 11:30:33 +0300 Subject: [PATCH 41/74] [Flux LoRA] fix for prior preservation and mixed precision sampling, follow up on #11873 (#12264) * propagate fixes from https://github.com/huggingface/diffusers/pull/11873/ to flux script * propagate fixes from https://github.com/huggingface/diffusers/pull/11873/ to flux script * propagate fixes from https://github.com/huggingface/diffusers/pull/11873/ to flux script * Apply style fixes --------- Co-authored-by: github-actions[bot] --- .../train_dreambooth_lora_flux_advanced.py | 4 +++- examples/dreambooth/train_dreambooth_lora_flux.py | 11 ++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py index 951b989d7a..a46490e8b3 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py @@ -1399,6 +1399,7 @@ def main(args): torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 + pipeline = FluxPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, @@ -1419,7 +1420,8 @@ def main(args): for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): - images = pipeline(example["prompt"]).images + with torch.autocast(device_type=accelerator.device.type, dtype=torch_dtype): + images = pipeline(prompt=example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() diff --git a/examples/dreambooth/train_dreambooth_lora_flux.py b/examples/dreambooth/train_dreambooth_lora_flux.py index 2353625c38..bd3a974a17 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux.py +++ b/examples/dreambooth/train_dreambooth_lora_flux.py @@ -1131,6 +1131,7 @@ def main(args): torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 + pipeline = FluxPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, @@ -1151,7 +1152,8 @@ def main(args): for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): - images = pipeline(example["prompt"]).images + with torch.autocast(device_type=accelerator.device.type, dtype=torch_dtype): + images = pipeline(prompt=example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() @@ -1159,8 +1161,7 @@ def main(args): image.save(image_filename) del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() + free_memory() # Handle the repository creation if accelerator.is_main_process: @@ -1728,6 +1729,10 @@ def main(args): device=accelerator.device, prompt=args.instance_prompt, ) + else: + prompt_embeds, pooled_prompt_embeds, text_ids = compute_text_embeddings( + prompts, text_encoders, tokenizers + ) # Convert images to latent space if args.cache_latents: From bcd4d77ba61c9ec295d0649448f108866b766708 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Wed, 3 Sep 2025 04:59:31 +0200 Subject: [PATCH 42/74] [CI] Remove big accelerator requirements from Quanto Tests (#12266) update --- tests/quantization/quanto/test_quanto.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/quantization/quanto/test_quanto.py b/tests/quantization/quanto/test_quanto.py index 28555a6076..e3463f136f 100644 --- a/tests/quantization/quanto/test_quanto.py +++ b/tests/quantization/quanto/test_quanto.py @@ -13,7 +13,7 @@ from ...testing_utils import ( nightly, numpy_cosine_similarity_distance, require_accelerate, - require_big_accelerator, + require_accelerator, require_torch_cuda_compatibility, torch_device, ) @@ -31,7 +31,7 @@ enable_full_determinism() @nightly -@require_big_accelerator +@require_accelerator @require_accelerate class QuantoBaseTesterMixin: model_id = None From 130fd8df54f24ffb006d84787b598d8adc899f23 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 3 Sep 2025 08:48:07 +0530 Subject: [PATCH 43/74] [core] use `kernels` to support `_flash_3_hub` attention backend (#12236) * feat: try loading fa3 using kernels when available. * up * change to Hub. * up * up * up * switch env var. * up * up * up * up * up * up --- src/diffusers/models/attention_dispatch.py | 65 +++++++++++++++++++++- src/diffusers/utils/constants.py | 1 + src/diffusers/utils/kernels_utils.py | 23 ++++++++ 3 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 src/diffusers/utils/kernels_utils.py diff --git a/src/diffusers/models/attention_dispatch.py b/src/diffusers/models/attention_dispatch.py index e123f4c193..f71be7c8ec 100644 --- a/src/diffusers/models/attention_dispatch.py +++ b/src/diffusers/models/attention_dispatch.py @@ -26,6 +26,7 @@ from ..utils import ( is_flash_attn_3_available, is_flash_attn_available, is_flash_attn_version, + is_kernels_available, is_sageattention_available, is_sageattention_version, is_torch_npu_available, @@ -35,7 +36,7 @@ from ..utils import ( is_xformers_available, is_xformers_version, ) -from ..utils.constants import DIFFUSERS_ATTN_BACKEND, DIFFUSERS_ATTN_CHECKS +from ..utils.constants import DIFFUSERS_ATTN_BACKEND, DIFFUSERS_ATTN_CHECKS, DIFFUSERS_ENABLE_HUB_KERNELS _REQUIRED_FLASH_VERSION = "2.6.3" @@ -67,6 +68,17 @@ else: flash_attn_3_func = None flash_attn_3_varlen_func = None +if DIFFUSERS_ENABLE_HUB_KERNELS: + if not is_kernels_available(): + raise ImportError( + "To use FA3 kernel for your hardware from the Hub, the `kernels` library must be installed. Install with `pip install kernels`." + ) + from ..utils.kernels_utils import _get_fa3_from_hub + + flash_attn_interface_hub = _get_fa3_from_hub() + flash_attn_3_func_hub = flash_attn_interface_hub.flash_attn_func +else: + flash_attn_3_func_hub = None if _CAN_USE_SAGE_ATTN: from sageattention import ( @@ -153,6 +165,8 @@ class AttentionBackendName(str, Enum): FLASH_VARLEN = "flash_varlen" _FLASH_3 = "_flash_3" _FLASH_VARLEN_3 = "_flash_varlen_3" + _FLASH_3_HUB = "_flash_3_hub" + # _FLASH_VARLEN_3_HUB = "_flash_varlen_3_hub" # not supported yet. # PyTorch native FLEX = "flex" @@ -351,6 +365,17 @@ def _check_attention_backend_requirements(backend: AttentionBackendName) -> None f"Flash Attention 3 backend '{backend.value}' is not usable because of missing package or the version is too old. Please build FA3 beta release from source." ) + # TODO: add support Hub variant of FA3 varlen later + elif backend in [AttentionBackendName._FLASH_3_HUB]: + if not DIFFUSERS_ENABLE_HUB_KERNELS: + raise RuntimeError( + f"Flash Attention 3 Hub backend '{backend.value}' is not usable because the `DIFFUSERS_ENABLE_HUB_KERNELS` env var isn't set. Please set it like `export DIFFUSERS_ENABLE_HUB_KERNELS=yes`." + ) + if not is_kernels_available(): + raise RuntimeError( + f"Flash Attention 3 Hub backend '{backend.value}' is not usable because the `kernels` package isn't available. Please install it with `pip install kernels`." + ) + elif backend in [ AttentionBackendName.SAGE, AttentionBackendName.SAGE_VARLEN, @@ -657,6 +682,44 @@ def _flash_attention_3( return (out, lse) if return_attn_probs else out +@_AttentionBackendRegistry.register( + AttentionBackendName._FLASH_3_HUB, + constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], +) +def _flash_attention_3_hub( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + scale: Optional[float] = None, + is_causal: bool = False, + window_size: Tuple[int, int] = (-1, -1), + softcap: float = 0.0, + deterministic: bool = False, + return_attn_probs: bool = False, +) -> torch.Tensor: + out = flash_attn_3_func_hub( + q=query, + k=key, + v=value, + softmax_scale=scale, + causal=is_causal, + qv=None, + q_descale=None, + k_descale=None, + v_descale=None, + window_size=window_size, + softcap=softcap, + num_splits=1, + pack_gqa=None, + deterministic=deterministic, + sm_margin=0, + return_attn_probs=return_attn_probs, + ) + # When `return_attn_probs` is True, the above returns a tuple of + # actual outputs and lse. + return (out[0], out[1]) if return_attn_probs else out + + @_AttentionBackendRegistry.register( AttentionBackendName._FLASH_VARLEN_3, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], diff --git a/src/diffusers/utils/constants.py b/src/diffusers/utils/constants.py index d9867fb875..8b4d76f3cb 100644 --- a/src/diffusers/utils/constants.py +++ b/src/diffusers/utils/constants.py @@ -46,6 +46,7 @@ DIFFUSERS_ATTN_CHECKS = os.getenv("DIFFUSERS_ATTN_CHECKS", "0") in ENV_VARS_TRUE DEFAULT_HF_PARALLEL_LOADING_WORKERS = 8 HF_ENABLE_PARALLEL_LOADING = os.environ.get("HF_ENABLE_PARALLEL_LOADING", "").upper() in ENV_VARS_TRUE_VALUES DIFFUSERS_DISABLE_REMOTE_CODE = os.getenv("DIFFUSERS_DISABLE_REMOTE_CODE", "false").lower() in ENV_VARS_TRUE_VALUES +DIFFUSERS_ENABLE_HUB_KERNELS = os.environ.get("DIFFUSERS_ENABLE_HUB_KERNELS", "").upper() in ENV_VARS_TRUE_VALUES # Below should be `True` if the current version of `peft` and `transformers` are compatible with # PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are diff --git a/src/diffusers/utils/kernels_utils.py b/src/diffusers/utils/kernels_utils.py new file mode 100644 index 0000000000..26d6e3972f --- /dev/null +++ b/src/diffusers/utils/kernels_utils.py @@ -0,0 +1,23 @@ +from ..utils import get_logger +from .import_utils import is_kernels_available + + +logger = get_logger(__name__) + + +_DEFAULT_HUB_ID_FA3 = "kernels-community/flash-attn3" + + +def _get_fa3_from_hub(): + if not is_kernels_available(): + return None + else: + from kernels import get_kernel + + try: + # TODO: temporary revision for now. Remove when merged upstream into `main`. + flash_attn_3_hub = get_kernel(_DEFAULT_HUB_ID_FA3, revision="fake-ops-return-probs") + return flash_attn_3_hub + except Exception as e: + logger.error(f"An error occurred while fetching kernel '{_DEFAULT_HUB_ID_FA3}' from the Hub: {e}") + raise From 6549b04ec6e20b2a4afd92872a1a06a1fa2893d4 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Tue, 2 Sep 2025 21:06:26 -0700 Subject: [PATCH 44/74] [docs] AutoPipeline (#12160) * refresh * feedback * feedback * supported models * fix --- docs/source/en/tutorials/autopipeline.md | 140 +++++++---------------- 1 file changed, 42 insertions(+), 98 deletions(-) diff --git a/docs/source/en/tutorials/autopipeline.md b/docs/source/en/tutorials/autopipeline.md index 44bf00398f..f0aa298b23 100644 --- a/docs/source/en/tutorials/autopipeline.md +++ b/docs/source/en/tutorials/autopipeline.md @@ -12,112 +12,56 @@ specific language governing permissions and limitations under the License. # AutoPipeline -Diffusers provides many pipelines for basic tasks like generating images, videos, audio, and inpainting. On top of these, there are specialized pipelines for adapters and features like upscaling, super-resolution, and more. Different pipeline classes can even use the same checkpoint because they share the same pretrained model! With so many different pipelines, it can be overwhelming to know which pipeline class to use. +[AutoPipeline](../api/models/auto_model) is a *task-and-model* pipeline that automatically selects the correct pipeline subclass based on the task. It handles the complexity of loading different pipeline subclasses without needing to know the specific pipeline subclass name. -The [AutoPipeline](../api/pipelines/auto_pipeline) class is designed to simplify the variety of pipelines in Diffusers. It is a generic *task-first* pipeline that lets you focus on a task ([`AutoPipelineForText2Image`], [`AutoPipelineForImage2Image`], and [`AutoPipelineForInpainting`]) without needing to know the specific pipeline class. The [AutoPipeline](../api/pipelines/auto_pipeline) automatically detects the correct pipeline class to use. +This is unlike [`DiffusionPipeline`], a *model-only* pipeline that automatically selects the pipeline subclass based on the model. -For example, let's use the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint. - -Under the hood, [AutoPipeline](../api/pipelines/auto_pipeline): - -1. Detects a `"stable-diffusion"` class from the [model_index.json](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0/blob/main/model_index.json) file. -2. Depending on the task you're interested in, it loads the [`StableDiffusionPipeline`], [`StableDiffusionImg2ImgPipeline`], or [`StableDiffusionInpaintPipeline`]. Any parameter (`strength`, `num_inference_steps`, etc.) you would pass to these specific pipelines can also be passed to the [AutoPipeline](../api/pipelines/auto_pipeline). - - - +[`AutoPipelineForImage2Image`] returns a specific pipeline subclass, (for example, [`StableDiffusionXLImg2ImgPipeline`]), which can only be used for image-to-image tasks. ```py -from diffusers import AutoPipelineForText2Image import torch - -pipe_txt2img = AutoPipelineForText2Image.from_pretrained( - "dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True -).to("cuda") - -prompt = "cinematic photo of Godzilla eating sushi with a cat in a izakaya, 35mm photograph, film, professional, 4k, highly detailed" -generator = torch.Generator(device="cpu").manual_seed(37) -image = pipe_txt2img(prompt, generator=generator).images[0] -image -``` - -
- -
- -
- - -```py from diffusers import AutoPipelineForImage2Image -from diffusers.utils import load_image -import torch - -pipe_img2img = AutoPipelineForImage2Image.from_pretrained( - "dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True -).to("cuda") - -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png") - -prompt = "cinematic photo of Godzilla eating burgers with a cat in a fast food restaurant, 35mm photograph, film, professional, 4k, highly detailed" -generator = torch.Generator(device="cpu").manual_seed(53) -image = pipe_img2img(prompt, image=init_image, generator=generator).images[0] -image -``` - -Notice how the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint is used for both text-to-image and image-to-image tasks? To save memory and avoid loading the checkpoint twice, use the [`~DiffusionPipeline.from_pipe`] method. - -```py -pipe_img2img = AutoPipelineForImage2Image.from_pipe(pipe_txt2img).to("cuda") -image = pipeline(prompt, image=init_image, generator=generator).images[0] -image -``` - -You can learn more about the [`~DiffusionPipeline.from_pipe`] method in the [Reuse a pipeline](../using-diffusers/loading#reuse-a-pipeline) guide. - -
- -
- -
- - -```py -from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image -import torch - -pipeline = AutoPipelineForInpainting.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True -).to("cuda") - -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-mask.png") - -prompt = "cinematic photo of a owl, 35mm photograph, film, professional, 4k, highly detailed" -generator = torch.Generator(device="cpu").manual_seed(38) -image = pipeline(prompt, image=init_image, mask_image=mask_image, generator=generator, strength=0.4).images[0] -image -``` - -
- -
- -
-
- -## Unsupported checkpoints - -The [AutoPipeline](../api/pipelines/auto_pipeline) supports [Stable Diffusion](../api/pipelines/stable_diffusion/overview), [Stable Diffusion XL](../api/pipelines/stable_diffusion/stable_diffusion_xl), [ControlNet](../api/pipelines/controlnet), [Kandinsky 2.1](../api/pipelines/kandinsky.md), [Kandinsky 2.2](../api/pipelines/kandinsky_v22), and [DeepFloyd IF](../api/pipelines/deepfloyd_if) checkpoints. - -If you try to load an unsupported checkpoint, you'll get an error. - -```py -from diffusers import AutoPipelineForImage2Image -import torch pipeline = AutoPipelineForImage2Image.from_pretrained( - "openai/shap-e-img2img", torch_dtype=torch.float16, use_safetensors=True + "RunDiffusion/Juggernaut-XL-v9", torch_dtype=torch.bfloat16, device_map="cuda", +) +print(pipeline) +"StableDiffusionXLImg2ImgPipeline { + "_class_name": "StableDiffusionXLImg2ImgPipeline", + ... +" +``` + +Loading the same model with [`DiffusionPipeline`] returns the [`StableDiffusionXLPipeline`] subclass. It can be used for text-to-image, image-to-image, or inpainting tasks depending on the inputs. + +```py +import torch +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained( + "RunDiffusion/Juggernaut-XL-v9", torch_dtype=torch.bfloat16, device_map="cuda", +) +print(pipeline) +"StableDiffusionXLPipeline { + "_class_name": "StableDiffusionXLPipeline", + ... +" +``` + +Check the [mappings](https://github.com/huggingface/diffusers/blob/130fd8df54f24ffb006d84787b598d8adc899f23/src/diffusers/pipelines/auto_pipeline.py#L114) to see whether a model is supported or not. + +Trying to load an unsupported model returns an error. + +```py +import torch +from diffusers import AutoPipelineForImage2Image + +pipeline = AutoPipelineForImage2Image.from_pretrained( + "openai/shap-e-img2img", torch_dtype=torch.float16, ) "ValueError: AutoPipeline can't find a pipeline linked to ShapEImg2ImgPipeline for None" ``` + +There are three types of [AutoPipeline](../api/models/auto_model) classes, [`AutoPipelineForText2Image`], [`AutoPipelineForImage2Image`] and [`AutoPipelineForInpainting`]. Each of these classes have a predefined mapping, linking a pipeline to their task-specific subclass. + +When [`~AutoPipelineForText2Image.from_pretrained`] is called, it extracts the class name from the `model_index.json` file and selects the appropriate pipeline subclass for the task based on the mapping. \ No newline at end of file From 4acbfbf13b300e25acbe2516db8fa13a640cad3a Mon Sep 17 00:00:00 2001 From: Ishan Modi <54568147+ishan-modi@users.noreply.github.com> Date: Wed, 3 Sep 2025 10:14:52 +0530 Subject: [PATCH 45/74] [Quantization] Add TRT-ModelOpt as a Backend (#11173) * initial commit * update * updates * update * update * update * update * update * update * addressed PR comments * update * addressed PR comments * update * update * update * update * update * update * updates * update * update * addressed PR comments * updates * code formatting * update * addressed PR comments * addressed PR comments * addressed PR comments * addressed PR comments * fix docs and dependencies * fixed dependency test --------- Co-authored-by: Sayak Paul --- .github/workflows/nightly_tests.yml | 3 + docs/source/en/_toctree.yml | 2 + docs/source/en/quantization/modelopt.md | 141 ++++++++ setup.py | 2 + src/diffusers/__init__.py | 21 ++ src/diffusers/dependency_versions_table.py | 1 + src/diffusers/quantizers/auto.py | 7 + src/diffusers/quantizers/modelopt/__init__.py | 1 + .../quantizers/modelopt/modelopt_quantizer.py | 190 +++++++++++ .../quantizers/quantization_config.py | 211 +++++++++++- src/diffusers/utils/__init__.py | 2 + .../utils/dummy_nvidia_modelopt_objects.py | 17 + src/diffusers/utils/import_utils.py | 20 ++ src/diffusers/utils/testing_utils.py | 13 + tests/others/test_dependencies.py | 2 + tests/quantization/modelopt/__init__.py | 0 tests/quantization/modelopt/test_modelopt.py | 306 ++++++++++++++++++ 17 files changed, 936 insertions(+), 3 deletions(-) create mode 100644 docs/source/en/quantization/modelopt.md create mode 100644 src/diffusers/quantizers/modelopt/__init__.py create mode 100644 src/diffusers/quantizers/modelopt/modelopt_quantizer.py create mode 100644 src/diffusers/utils/dummy_nvidia_modelopt_objects.py create mode 100644 tests/quantization/modelopt/__init__.py create mode 100644 tests/quantization/modelopt/test_modelopt.py diff --git a/.github/workflows/nightly_tests.yml b/.github/workflows/nightly_tests.yml index 9216564093..479e5503ee 100644 --- a/.github/workflows/nightly_tests.yml +++ b/.github/workflows/nightly_tests.yml @@ -340,6 +340,9 @@ jobs: - backend: "optimum_quanto" test_location: "quanto" additional_deps: [] + - backend: "nvidia_modelopt" + test_location: "modelopt" + additional_deps: [] runs-on: group: aws-g6e-xlarge-plus container: diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index a0ddf8f256..a97c82796f 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -188,6 +188,8 @@ title: torchao - local: quantization/quanto title: quanto + - local: quantization/modelopt + title: NVIDIA ModelOpt - title: Model accelerators and hardware isExpanded: false diff --git a/docs/source/en/quantization/modelopt.md b/docs/source/en/quantization/modelopt.md new file mode 100644 index 0000000000..06933d47c2 --- /dev/null +++ b/docs/source/en/quantization/modelopt.md @@ -0,0 +1,141 @@ + + +# NVIDIA ModelOpt + +[NVIDIA-ModelOpt](https://github.com/NVIDIA/TensorRT-Model-Optimizer) is a unified library of state-of-the-art model optimization techniques like quantization, pruning, distillation, speculative decoding, etc. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed. + +Before you begin, make sure you have nvidia_modelopt installed. + +```bash +pip install -U "nvidia_modelopt[hf]" +``` + +Quantize a model by passing [`NVIDIAModelOptConfig`] to [`~ModelMixin.from_pretrained`] (you can also load pre-quantized models). This works for any model in any modality, as long as it supports loading with [Accelerate](https://hf.co/docs/accelerate/index) and contains `torch.nn.Linear` layers. + +The example below only quantizes the weights to FP8. + +```python +import torch +from diffusers import AutoModel, SanaPipeline, NVIDIAModelOptConfig + +model_id = "Efficient-Large-Model/Sana_600M_1024px_diffusers" +dtype = torch.bfloat16 + +quantization_config = NVIDIAModelOptConfig(quant_type="FP8", quant_method="modelopt") +transformer = AutoModel.from_pretrained( + model_id, + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=dtype, +) +pipe = SanaPipeline.from_pretrained( + model_id, + transformer=transformer, + torch_dtype=dtype, +) +pipe.to("cuda") + +print(f"Pipeline memory usage: {torch.cuda.max_memory_reserved() / 1024**3:.3f} GB") + +prompt = "A cat holding a sign that says hello world" +image = pipe( + prompt, num_inference_steps=50, guidance_scale=4.5, max_sequence_length=512 +).images[0] +image.save("output.png") +``` + +> **Note:** +> +> The quantization methods in NVIDIA-ModelOpt are designed to reduce the memory footprint of model weights using various QAT (Quantization-Aware Training) and PTQ (Post-Training Quantization) techniques while maintaining model performance. However, the actual performance gain during inference depends on the deployment framework (e.g., TRT-LLM, TensorRT) and the specific hardware configuration. +> +> More details can be found [here](https://github.com/NVIDIA/TensorRT-Model-Optimizer/tree/main/examples). + +## NVIDIAModelOptConfig + +The `NVIDIAModelOptConfig` class accepts three parameters: +- `quant_type`: A string value mentioning one of the quantization types below. +- `modules_to_not_convert`: A list of module full/partial module names for which quantization should not be performed. For example, to not perform any quantization of the [`SD3Transformer2DModel`]'s pos_embed projection blocks, one would specify: `modules_to_not_convert=["pos_embed.proj.weight"]`. +- `disable_conv_quantization`: A boolean value which when set to `True` disables quantization for all convolutional layers in the model. This is useful as channel and block quantization generally don't work well with convolutional layers (used with INT4, NF4, NVFP4). If you want to disable quantization for specific convolutional layers, use `modules_to_not_convert` instead. +- `algorithm`: The algorithm to use for determining scale, defaults to `"max"`. You can check modelopt documentation for more algorithms and details. +- `forward_loop`: The forward loop function to use for calibrating activation during quantization. If not provided, it relies on static scale values computed using the weights only. +- `kwargs`: A dict of keyword arguments to pass to the underlying quantization method which will be invoked based on `quant_type`. + +## Supported quantization types + +ModelOpt supports weight-only, channel and block quantization int8, fp8, int4, nf4, and nvfp4. The quantization methods are designed to reduce the memory footprint of the model weights while maintaining the performance of the model during inference. + +Weight-only quantization stores the model weights in a specific low-bit data type but performs computation with a higher-precision data type, like `bfloat16`. This lowers the memory requirements from model weights but retains the memory peaks for activation computation. + +The quantization methods supported are as follows: + +| **Quantization Type** | **Supported Schemes** | **Required Kwargs** | **Additional Notes** | +|-----------------------|-----------------------|---------------------|----------------------| +| **INT8** | `int8 weight only`, `int8 channel quantization`, `int8 block quantization` | `quant_type`, `quant_type + channel_quantize`, `quant_type + channel_quantize + block_quantize` | +| **FP8** | `fp8 weight only`, `fp8 channel quantization`, `fp8 block quantization` | `quant_type`, `quant_type + channel_quantize`, `quant_type + channel_quantize + block_quantize` | +| **INT4** | `int4 weight only`, `int4 block quantization` | `quant_type`, `quant_type + channel_quantize + block_quantize` | `channel_quantize = -1 is only supported for now`| +| **NF4** | `nf4 weight only`, `nf4 double block quantization` | `quant_type`, `quant_type + channel_quantize + block_quantize + scale_channel_quantize` + `scale_block_quantize` | `channel_quantize = -1 and scale_channel_quantize = -1 are only supported for now` | +| **NVFP4** | `nvfp4 weight only`, `nvfp4 block quantization` | `quant_type`, `quant_type + channel_quantize + block_quantize` | `channel_quantize = -1 is only supported for now`| + + +Refer to the [official modelopt documentation](https://nvidia.github.io/TensorRT-Model-Optimizer/) for a better understanding of the available quantization methods and the exhaustive list of configuration options available. + +## Serializing and Deserializing quantized models + +To serialize a quantized model in a given dtype, first load the model with the desired quantization dtype and then save it using the [`~ModelMixin.save_pretrained`] method. + +```python +import torch +from diffusers import AutoModel, NVIDIAModelOptConfig +from modelopt.torch.opt import enable_huggingface_checkpointing + +enable_huggingface_checkpointing() + +model_id = "Efficient-Large-Model/Sana_600M_1024px_diffusers" +quant_config_fp8 = {"quant_type": "FP8", "quant_method": "modelopt"} +quant_config_fp8 = NVIDIAModelOptConfig(**quant_config_fp8) +model = AutoModel.from_pretrained( + model_id, + subfolder="transformer", + quantization_config=quant_config_fp8, + torch_dtype=torch.bfloat16, +) +model.save_pretrained('path/to/sana_fp8', safe_serialization=False) +``` + +To load a serialized quantized model, use the [`~ModelMixin.from_pretrained`] method. + +```python +import torch +from diffusers import AutoModel, NVIDIAModelOptConfig, SanaPipeline +from modelopt.torch.opt import enable_huggingface_checkpointing + +enable_huggingface_checkpointing() + +quantization_config = NVIDIAModelOptConfig(quant_type="FP8", quant_method="modelopt") +transformer = AutoModel.from_pretrained( + "path/to/sana_fp8", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, +) +pipe = SanaPipeline.from_pretrained( + "Efficient-Large-Model/Sana_600M_1024px_diffusers", + transformer=transformer, + torch_dtype=torch.bfloat16, +) +pipe.to("cuda") +prompt = "A cat holding a sign that says hello world" +image = pipe( + prompt, num_inference_steps=50, guidance_scale=4.5, max_sequence_length=512 +).images[0] +image.save("output.png") +``` diff --git a/setup.py b/setup.py index 62d984d9b6..ba3ad8e2b3 100644 --- a/setup.py +++ b/setup.py @@ -132,6 +132,7 @@ _deps = [ "gguf>=0.10.0", "torchao>=0.7.0", "bitsandbytes>=0.43.3", + "nvidia_modelopt[hf]>=0.33.1", "regex!=2019.12.17", "requests", "tensorboard", @@ -244,6 +245,7 @@ extras["bitsandbytes"] = deps_list("bitsandbytes", "accelerate") extras["gguf"] = deps_list("gguf", "accelerate") extras["optimum_quanto"] = deps_list("optimum_quanto", "accelerate") extras["torchao"] = deps_list("torchao", "accelerate") +extras["nvidia_modelopt"] = deps_list("nvidia_modelopt[hf]") if os.name == "nt": # windows extras["flax"] = [] # jax is not supported on windows diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 762ae3846a..fa5dd6482c 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -13,6 +13,7 @@ from .utils import ( is_k_diffusion_available, is_librosa_available, is_note_seq_available, + is_nvidia_modelopt_available, is_onnx_available, is_opencv_available, is_optimum_quanto_available, @@ -111,6 +112,18 @@ except OptionalDependencyNotAvailable: else: _import_structure["quantizers.quantization_config"].append("QuantoConfig") +try: + if not is_torch_available() and not is_accelerate_available() and not is_nvidia_modelopt_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_nvidia_modelopt_objects + + _import_structure["utils.dummy_nvidia_modelopt_objects"] = [ + name for name in dir(dummy_nvidia_modelopt_objects) if not name.startswith("_") + ] +else: + _import_structure["quantizers.quantization_config"].append("NVIDIAModelOptConfig") + try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() @@ -795,6 +808,14 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: else: from .quantizers.quantization_config import QuantoConfig + try: + if not is_nvidia_modelopt_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_nvidia_modelopt_objects import * + else: + from .quantizers.quantization_config import NVIDIAModelOptConfig + try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() diff --git a/src/diffusers/dependency_versions_table.py b/src/diffusers/dependency_versions_table.py index a3832cf9b8..79dc4c50a0 100644 --- a/src/diffusers/dependency_versions_table.py +++ b/src/diffusers/dependency_versions_table.py @@ -39,6 +39,7 @@ deps = { "gguf": "gguf>=0.10.0", "torchao": "torchao>=0.7.0", "bitsandbytes": "bitsandbytes>=0.43.3", + "nvidia_modelopt[hf]": "nvidia_modelopt[hf]>=0.33.1", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", diff --git a/src/diffusers/quantizers/auto.py b/src/diffusers/quantizers/auto.py index ce214ae7bc..070bcd0b21 100644 --- a/src/diffusers/quantizers/auto.py +++ b/src/diffusers/quantizers/auto.py @@ -21,9 +21,11 @@ from typing import Dict, Optional, Union from .bitsandbytes import BnB4BitDiffusersQuantizer, BnB8BitDiffusersQuantizer from .gguf import GGUFQuantizer +from .modelopt import NVIDIAModelOptQuantizer from .quantization_config import ( BitsAndBytesConfig, GGUFQuantizationConfig, + NVIDIAModelOptConfig, QuantizationConfigMixin, QuantizationMethod, QuantoConfig, @@ -39,6 +41,7 @@ AUTO_QUANTIZER_MAPPING = { "gguf": GGUFQuantizer, "quanto": QuantoQuantizer, "torchao": TorchAoHfQuantizer, + "modelopt": NVIDIAModelOptQuantizer, } AUTO_QUANTIZATION_CONFIG_MAPPING = { @@ -47,6 +50,7 @@ AUTO_QUANTIZATION_CONFIG_MAPPING = { "gguf": GGUFQuantizationConfig, "quanto": QuantoConfig, "torchao": TorchAoConfig, + "modelopt": NVIDIAModelOptConfig, } @@ -137,6 +141,9 @@ class DiffusersAutoQuantizer: if isinstance(quantization_config, dict): quantization_config = cls.from_dict(quantization_config) + if isinstance(quantization_config, NVIDIAModelOptConfig): + quantization_config.check_model_patching() + if warning_msg != "": warnings.warn(warning_msg) diff --git a/src/diffusers/quantizers/modelopt/__init__.py b/src/diffusers/quantizers/modelopt/__init__.py new file mode 100644 index 0000000000..ae0951cb30 --- /dev/null +++ b/src/diffusers/quantizers/modelopt/__init__.py @@ -0,0 +1 @@ +from .modelopt_quantizer import NVIDIAModelOptQuantizer diff --git a/src/diffusers/quantizers/modelopt/modelopt_quantizer.py b/src/diffusers/quantizers/modelopt/modelopt_quantizer.py new file mode 100644 index 0000000000..534f752321 --- /dev/null +++ b/src/diffusers/quantizers/modelopt/modelopt_quantizer.py @@ -0,0 +1,190 @@ +from typing import TYPE_CHECKING, Any, Dict, List, Union + +from ...utils import ( + get_module_from_name, + is_accelerate_available, + is_nvidia_modelopt_available, + is_torch_available, + logging, +) +from ..base import DiffusersQuantizer + + +if TYPE_CHECKING: + from ...models.modeling_utils import ModelMixin + + +if is_torch_available(): + import torch + import torch.nn as nn + +if is_accelerate_available(): + from accelerate.utils import set_module_tensor_to_device + + +logger = logging.get_logger(__name__) + + +class NVIDIAModelOptQuantizer(DiffusersQuantizer): + r""" + Diffusers Quantizer for TensorRT Model Optimizer + """ + + use_keep_in_fp32_modules = True + requires_calibration = False + required_packages = ["nvidia_modelopt"] + + def __init__(self, quantization_config, **kwargs): + super().__init__(quantization_config, **kwargs) + + def validate_environment(self, *args, **kwargs): + if not is_nvidia_modelopt_available(): + raise ImportError( + "Loading an nvidia-modelopt quantized model requires nvidia-modelopt library (`pip install nvidia-modelopt`)" + ) + + self.offload = False + + device_map = kwargs.get("device_map", None) + if isinstance(device_map, dict): + if "cpu" in device_map.values() or "disk" in device_map.values(): + if self.pre_quantized: + raise ValueError( + "You are attempting to perform cpu/disk offload with a pre-quantized modelopt model " + "This is not supported yet. Please remove the CPU or disk device from the `device_map` argument." + ) + else: + self.offload = True + + def check_if_quantized_param( + self, + model: "ModelMixin", + param_value: "torch.Tensor", + param_name: str, + state_dict: Dict[str, Any], + **kwargs, + ): + # ModelOpt imports diffusers internally. This is here to prevent circular imports + from modelopt.torch.quantization.utils import is_quantized + + module, tensor_name = get_module_from_name(model, param_name) + if self.pre_quantized: + return True + elif is_quantized(module) and "weight" in tensor_name: + return True + return False + + def create_quantized_param( + self, + model: "ModelMixin", + param_value: "torch.Tensor", + param_name: str, + target_device: "torch.device", + *args, + **kwargs, + ): + """ + Create the quantized parameter by calling .calibrate() after setting it to the module. + """ + # ModelOpt imports diffusers internally. This is here to prevent circular imports + import modelopt.torch.quantization as mtq + + dtype = kwargs.get("dtype", torch.float32) + module, tensor_name = get_module_from_name(model, param_name) + if self.pre_quantized: + module._parameters[tensor_name] = torch.nn.Parameter(param_value.to(device=target_device)) + else: + set_module_tensor_to_device(model, param_name, target_device, param_value, dtype) + mtq.calibrate( + module, self.quantization_config.modelopt_config["algorithm"], self.quantization_config.forward_loop + ) + mtq.compress(module) + module.weight.requires_grad = False + + def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: + max_memory = {key: val * 0.90 for key, val in max_memory.items()} + return max_memory + + def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": + if self.quantization_config.quant_type == "FP8": + target_dtype = torch.float8_e4m3fn + return target_dtype + + def update_torch_dtype(self, torch_dtype: "torch.dtype" = None) -> "torch.dtype": + if torch_dtype is None: + logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.") + torch_dtype = torch.float32 + return torch_dtype + + def get_conv_param_names(self, model: "ModelMixin") -> List[str]: + """ + Get parameter names for all convolutional layers in a HuggingFace ModelMixin. Includes Conv1d/2d/3d and + ConvTranspose1d/2d/3d. + """ + conv_types = ( + nn.Conv1d, + nn.Conv2d, + nn.Conv3d, + nn.ConvTranspose1d, + nn.ConvTranspose2d, + nn.ConvTranspose3d, + ) + + conv_param_names = [] + for name, module in model.named_modules(): + if isinstance(module, conv_types): + for param_name, _ in module.named_parameters(recurse=False): + conv_param_names.append(f"{name}.{param_name}") + + return conv_param_names + + def _process_model_before_weight_loading( + self, + model: "ModelMixin", + device_map, + keep_in_fp32_modules: List[str] = [], + **kwargs, + ): + # ModelOpt imports diffusers internally. This is here to prevent circular imports + import modelopt.torch.opt as mto + + if self.pre_quantized: + return + + modules_to_not_convert = self.quantization_config.modules_to_not_convert + + if modules_to_not_convert is None: + modules_to_not_convert = [] + if isinstance(modules_to_not_convert, str): + modules_to_not_convert = [modules_to_not_convert] + modules_to_not_convert.extend(keep_in_fp32_modules) + if self.quantization_config.disable_conv_quantization: + modules_to_not_convert.extend(self.get_conv_param_names(model)) + + for module in modules_to_not_convert: + self.quantization_config.modelopt_config["quant_cfg"]["*" + module + "*"] = {"enable": False} + self.quantization_config.modules_to_not_convert = modules_to_not_convert + mto.apply_mode(model, mode=[("quantize", self.quantization_config.modelopt_config)]) + model.config.quantization_config = self.quantization_config + + def _process_model_after_weight_loading(self, model, **kwargs): + # ModelOpt imports diffusers internally. This is here to prevent circular imports + from modelopt.torch.opt import ModeloptStateManager + + if self.pre_quantized: + return model + + for _, m in model.named_modules(): + if hasattr(m, ModeloptStateManager._state_key) and m is not model: + ModeloptStateManager.remove_state(m) + + return model + + @property + def is_trainable(self): + return True + + @property + def is_serializable(self): + self.quantization_config.check_model_patching(operation="saving") + return True diff --git a/src/diffusers/quantizers/quantization_config.py b/src/diffusers/quantizers/quantization_config.py index 871faf076e..bf85795651 100644 --- a/src/diffusers/quantizers/quantization_config.py +++ b/src/diffusers/quantizers/quantization_config.py @@ -25,10 +25,11 @@ import importlib.metadata import inspect import json import os +import warnings from dataclasses import dataclass from enum import Enum from functools import partial -from typing import Any, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union from packaging import version @@ -46,6 +47,7 @@ class QuantizationMethod(str, Enum): GGUF = "gguf" TORCHAO = "torchao" QUANTO = "quanto" + MODELOPT = "modelopt" if is_torchao_available(): @@ -268,7 +270,14 @@ class BitsAndBytesConfig(QuantizationConfigMixin): if bnb_4bit_quant_storage is None: self.bnb_4bit_quant_storage = torch.uint8 elif isinstance(bnb_4bit_quant_storage, str): - if bnb_4bit_quant_storage not in ["float16", "float32", "int8", "uint8", "float64", "bfloat16"]: + if bnb_4bit_quant_storage not in [ + "float16", + "float32", + "int8", + "uint8", + "float64", + "bfloat16", + ]: raise ValueError( "`bnb_4bit_quant_storage` must be a valid string (one of 'float16', 'float32', 'int8', 'uint8', 'float64', 'bfloat16') " ) @@ -479,7 +488,12 @@ class TorchAoConfig(QuantizationConfigMixin): ``` """ - def __init__(self, quant_type: str, modules_to_not_convert: Optional[List[str]] = None, **kwargs) -> None: + def __init__( + self, + quant_type: str, + modules_to_not_convert: Optional[List[str]] = None, + **kwargs, + ) -> None: self.quant_method = QuantizationMethod.TORCHAO self.quant_type = quant_type self.modules_to_not_convert = modules_to_not_convert @@ -724,3 +738,194 @@ class QuantoConfig(QuantizationConfigMixin): accepted_weights = ["float8", "int8", "int4", "int2"] if self.weights_dtype not in accepted_weights: raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights_dtype}") + + +@dataclass +class NVIDIAModelOptConfig(QuantizationConfigMixin): + """This is a config class to use nvidia modelopt for quantization. + + Args: + quant_type (`str`): + The type of quantization we want to use, following is how to use: + **weightquant_activationquant ==> FP8_FP8** In the above example we have use FP8 for both weight and + activation quantization. Following are the all the options: + - FP8 + - INT8 + - INT4 + - NF4 + - NVFP4 + modules_to_not_convert (`List[str]`, *optional*, default to `None`): + The list of modules to not quantize, useful for quantizing models that explicitly require to have some + weight_only (`bool`, *optional*, default to `False`): + If set to `True`, the quantization will be applied only to the weights of the model. + channel_quantize (`int`, *optional*, default to `None`): + The channel quantization axis, useful for quantizing models across different axes. + block_quantize (`int`, *optional*, default to `None`): + The block size, useful to further quantize each channel/axes into blocks. + scale_channel_quantize (`int`, *optional*, default to `None`): + The scale channel quantization axis, useful for quantizing calculated scale across different axes. + scale_block_quantize (`int`, *optional*, default to `None`): + The scale block size, useful for quantizing each scale channel/axes into blocks. + algorithm (`str`, *optional*, default to `"max"`): + The algorithm to use for quantization, currently only supports `"max"`. + forward_loop (`Callable`, *optional*, default to `None`): + The forward loop function to use for calibration during quantization. + modelopt_config (`dict`, *optional*, default to `None`): + The modelopt config, useful for passing custom configs to modelopt. + disable_conv_quantization (`bool`, *optional*, default to `False`): + If set to `True`, the quantization will be disabled for convolutional layers. + kwargs (`Dict[str, Any]`, *optional*): + Additional parameters which are to be used for calibration. + """ + + quanttype_to_numbits = { + "FP8": (4, 3), + "INT8": 8, + "INT4": 4, + "NF4": 4, + "NVFP4": (2, 1), + } + quanttype_to_scalingbits = { + "NF4": 8, + "NVFP4": (4, 3), + } + + def __init__( + self, + quant_type: str, + modules_to_not_convert: Optional[List[str]] = None, + weight_only: bool = True, + channel_quantize: Optional[int] = None, + block_quantize: Optional[int] = None, + scale_channel_quantize: Optional[int] = None, + scale_block_quantize: Optional[int] = None, + algorithm: str = "max", + forward_loop: Optional[Callable] = None, + modelopt_config: Optional[dict] = None, + disable_conv_quantization: bool = False, + **kwargs, + ) -> None: + self.quant_method = QuantizationMethod.MODELOPT + self._normalize_quant_type(quant_type) + self.modules_to_not_convert = modules_to_not_convert + self.weight_only = weight_only + self.channel_quantize = channel_quantize + self.block_quantize = block_quantize + self.calib_cfg = { + "method": algorithm, + # add more options here if needed + } + self.forward_loop = forward_loop + self.scale_channel_quantize = scale_channel_quantize + self.scale_block_quantize = scale_block_quantize + self.modelopt_config = self.get_config_from_quant_type() if not modelopt_config else modelopt_config + self.disable_conv_quantization = disable_conv_quantization + + def check_model_patching(self, operation: str = "loading"): + # ModelOpt imports diffusers internally. This is here to prevent circular imports + from modelopt.torch.opt.plugins.huggingface import _PATCHED_CLASSES + + if len(_PATCHED_CLASSES) == 0: + warning_msg = ( + f"Not {operation} weights in modelopt format. This might cause unreliable behavior." + "Please make sure to run the following code before loading/saving model weights:\n\n" + " from modelopt.torch.opt import enable_huggingface_checkpointing\n" + " enable_huggingface_checkpointing()\n" + ) + warnings.warn(warning_msg) + + def _normalize_quant_type(self, quant_type: str) -> str: + """ + Validates and normalizes the quantization type string. + + Splits the quant_type into weight and activation components, verifies them against supported types, and + replaces unsupported values with safe defaults. + + Args: + quant_type (str): The input quantization type string (e.g., 'FP8_INT8'). + + Returns: + str: A valid quantization type string (e.g., 'FP8_INT8' or 'FP8'). + """ + parts = quant_type.split("_") + w_type = parts[0] + act_type = parts[1] if len(parts) > 1 else None + if len(parts) > 2: + logger.warning(f"Quantization type {quant_type} is not supported. Picking FP8_INT8 as default") + w_type = "FP8" + act_type = None + else: + if w_type not in NVIDIAModelOptConfig.quanttype_to_numbits: + logger.warning(f"Weight Quantization type {w_type} is not supported. Picking FP8 as default") + w_type = "FP8" + if act_type is not None and act_type not in NVIDIAModelOptConfig.quanttype_to_numbits: + logger.warning(f"Activation Quantization type {act_type} is not supported. Picking INT8 as default") + act_type = None + self.quant_type = w_type + ("_" + act_type if act_type is not None else "") + + def get_config_from_quant_type(self) -> Dict[str, Any]: + """ + Get the config from the quantization type. + """ + import modelopt.torch.quantization as mtq + + BASE_CONFIG = { + "quant_cfg": { + "*weight_quantizer": {"fake_quant": False}, + "*input_quantizer": {}, + "*output_quantizer": {"enable": False}, + "*q_bmm_quantizer": {}, + "*k_bmm_quantizer": {}, + "*v_bmm_quantizer": {}, + "*softmax_quantizer": {}, + **mtq.config._default_disabled_quantizer_cfg, + }, + "algorithm": self.calib_cfg, + } + + quant_cfg = BASE_CONFIG["quant_cfg"] + if self.weight_only: + for k in quant_cfg: + if "*weight_quantizer" not in k and not quant_cfg[k]: + quant_cfg[k]["enable"] = False + + parts = self.quant_type.split("_") + w_type = parts[0] + act_type = parts[1].replace("A", "") if len(parts) > 1 else None + for k in quant_cfg: + if k not in mtq.config._default_disabled_quantizer_cfg and "enable" not in quant_cfg[k]: + if k == "*input_quantizer": + if act_type is not None: + quant_cfg[k]["num_bits"] = NVIDIAModelOptConfig.quanttype_to_numbits[act_type] + continue + quant_cfg[k]["num_bits"] = NVIDIAModelOptConfig.quanttype_to_numbits[w_type] + + if self.block_quantize is not None and self.channel_quantize is not None: + quant_cfg["*weight_quantizer"]["block_sizes"] = {self.channel_quantize: self.block_quantize} + quant_cfg["*input_quantizer"]["block_sizes"] = { + self.channel_quantize: self.block_quantize, + "type": "dynamic", + } + elif self.channel_quantize is not None: + quant_cfg["*weight_quantizer"]["axis"] = self.channel_quantize + quant_cfg["*input_quantizer"]["axis"] = self.channel_quantize + quant_cfg["*input_quantizer"]["type"] = "dynamic" + + # Only fixed scaling sizes are supported for now in modelopt + if self.scale_channel_quantize is not None and self.scale_block_quantize is not None: + if w_type in NVIDIAModelOptConfig.quanttype_to_scalingbits: + quant_cfg["*weight_quantizer"]["block_sizes"].update( + { + "scale_bits": NVIDIAModelOptConfig.quanttype_to_scalingbits[w_type], + "scale_block_sizes": {self.scale_channel_quantize: self.scale_block_quantize}, + } + ) + if act_type and act_type in NVIDIAModelOptConfig.quanttype_to_scalingbits: + quant_cfg["*input_quantizer"]["block_sizes"].update( + { + "scale_bits": NVIDIAModelOptConfig.quanttype_to_scalingbits[act_type], + "scale_block_sizes": {self.scale_channel_quantize: self.scale_block_quantize}, + } + ) + + return BASE_CONFIG diff --git a/src/diffusers/utils/__init__.py b/src/diffusers/utils/__init__.py index b27cf981ed..63932221b2 100644 --- a/src/diffusers/utils/__init__.py +++ b/src/diffusers/utils/__init__.py @@ -89,6 +89,8 @@ from .import_utils import ( is_matplotlib_available, is_nltk_available, is_note_seq_available, + is_nvidia_modelopt_available, + is_nvidia_modelopt_version, is_onnx_available, is_opencv_available, is_optimum_quanto_available, diff --git a/src/diffusers/utils/dummy_nvidia_modelopt_objects.py b/src/diffusers/utils/dummy_nvidia_modelopt_objects.py new file mode 100644 index 0000000000..046b28223b --- /dev/null +++ b/src/diffusers/utils/dummy_nvidia_modelopt_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class NVIDIAModelOptConfig(metaclass=DummyObject): + _backends = ["nvidia_modelopt"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["nvidia_modelopt"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["nvidia_modelopt"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["nvidia_modelopt"]) diff --git a/src/diffusers/utils/import_utils.py b/src/diffusers/utils/import_utils.py index 153be05738..9399ccd2a7 100644 --- a/src/diffusers/utils/import_utils.py +++ b/src/diffusers/utils/import_utils.py @@ -226,6 +226,7 @@ _sageattention_available, _sageattention_version = _is_package_available("sageat _flash_attn_available, _flash_attn_version = _is_package_available("flash_attn") _flash_attn_3_available, _flash_attn_3_version = _is_package_available("flash_attn_3") _kornia_available, _kornia_version = _is_package_available("kornia") +_nvidia_modelopt_available, _nvidia_modelopt_version = _is_package_available("modelopt", get_dist_name=True) def is_torch_available(): @@ -364,6 +365,10 @@ def is_optimum_quanto_available(): return _optimum_quanto_available +def is_nvidia_modelopt_available(): + return _nvidia_modelopt_available + + def is_timm_available(): return _timm_available @@ -830,6 +835,21 @@ def is_optimum_quanto_version(operation: str, version: str): return compare_versions(parse(_optimum_quanto_version), operation, version) +def is_nvidia_modelopt_version(operation: str, version: str): + """ + Compares the current Nvidia ModelOpt version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _nvidia_modelopt_available: + return False + return compare_versions(parse(_nvidia_modelopt_version), operation, version) + + def is_xformers_version(operation: str, version: str): """ Compares the current xformers version to a given reference with an operation. diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index 6d6a7d6ce4..3297bb5fdc 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -38,6 +38,7 @@ from .import_utils import ( is_gguf_available, is_kernels_available, is_note_seq_available, + is_nvidia_modelopt_available, is_onnx_available, is_opencv_available, is_optimum_quanto_available, @@ -638,6 +639,18 @@ def require_torchao_version_greater_or_equal(torchao_version): return decorator +def require_modelopt_version_greater_or_equal(modelopt_version): + def decorator(test_case): + correct_nvidia_modelopt_version = is_nvidia_modelopt_available() and version.parse( + version.parse(importlib.metadata.version("modelopt")).base_version + ) >= version.parse(modelopt_version) + return unittest.skipUnless( + correct_nvidia_modelopt_version, f"Test requires modelopt with version greater than {modelopt_version}." + )(test_case) + + return decorator + + def require_kernels_version_greater_or_equal(kernels_version): def decorator(test_case): correct_kernels_version = is_kernels_available() and version.parse( diff --git a/tests/others/test_dependencies.py b/tests/others/test_dependencies.py index a08129a1e9..db22f10c4b 100644 --- a/tests/others/test_dependencies.py +++ b/tests/others/test_dependencies.py @@ -39,6 +39,8 @@ class DependencyTester(unittest.TestCase): backend = "invisible-watermark" elif backend == "opencv": backend = "opencv-python" + elif backend == "nvidia_modelopt": + backend = "nvidia_modelopt[hf]" assert backend in deps, f"{backend} is not in the deps table!" def test_pipeline_imports(self): diff --git a/tests/quantization/modelopt/__init__.py b/tests/quantization/modelopt/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/quantization/modelopt/test_modelopt.py b/tests/quantization/modelopt/test_modelopt.py new file mode 100644 index 0000000000..6b0624a280 --- /dev/null +++ b/tests/quantization/modelopt/test_modelopt.py @@ -0,0 +1,306 @@ +import gc +import tempfile +import unittest + +from diffusers import NVIDIAModelOptConfig, SD3Transformer2DModel, StableDiffusion3Pipeline +from diffusers.utils import is_nvidia_modelopt_available, is_torch_available +from diffusers.utils.testing_utils import ( + backend_empty_cache, + backend_reset_peak_memory_stats, + enable_full_determinism, + nightly, + numpy_cosine_similarity_distance, + require_accelerate, + require_big_accelerator, + require_modelopt_version_greater_or_equal, + require_torch_cuda_compatibility, + torch_device, +) + + +if is_nvidia_modelopt_available(): + import modelopt.torch.quantization as mtq + +if is_torch_available(): + import torch + + from ..utils import LoRALayer, get_memory_consumption_stat + +enable_full_determinism() + + +@nightly +@require_big_accelerator +@require_accelerate +@require_modelopt_version_greater_or_equal("0.33.1") +class ModelOptBaseTesterMixin: + model_id = "hf-internal-testing/tiny-sd3-pipe" + model_cls = SD3Transformer2DModel + pipeline_cls = StableDiffusion3Pipeline + torch_dtype = torch.bfloat16 + expected_memory_reduction = 0.0 + keep_in_fp32_module = "" + modules_to_not_convert = "" + _test_torch_compile = False + + def setUp(self): + backend_reset_peak_memory_stats(torch_device) + backend_empty_cache(torch_device) + gc.collect() + + def tearDown(self): + backend_reset_peak_memory_stats(torch_device) + backend_empty_cache(torch_device) + gc.collect() + + def get_dummy_init_kwargs(self): + return {"quant_type": "FP8"} + + def get_dummy_model_init_kwargs(self): + return { + "pretrained_model_name_or_path": self.model_id, + "torch_dtype": self.torch_dtype, + "quantization_config": NVIDIAModelOptConfig(**self.get_dummy_init_kwargs()), + "subfolder": "transformer", + } + + def test_modelopt_layers(self): + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + assert mtq.utils.is_quantized(module) + + def test_modelopt_memory_usage(self): + inputs = self.get_dummy_inputs() + inputs = { + k: v.to(device=torch_device, dtype=torch.bfloat16) for k, v in inputs.items() if not isinstance(v, bool) + } + + unquantized_model = self.model_cls.from_pretrained( + self.model_id, torch_dtype=self.torch_dtype, subfolder="transformer" + ) + unquantized_model.to(torch_device) + unquantized_model_memory = get_memory_consumption_stat(unquantized_model, inputs) + + quantized_model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + quantized_model.to(torch_device) + quantized_model_memory = get_memory_consumption_stat(quantized_model, inputs) + + assert unquantized_model_memory / quantized_model_memory >= self.expected_memory_reduction + + def test_keep_modules_in_fp32(self): + _keep_in_fp32_modules = self.model_cls._keep_in_fp32_modules + self.model_cls._keep_in_fp32_modules = self.keep_in_fp32_module + + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + model.to(torch_device) + + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + if name in model._keep_in_fp32_modules: + assert module.weight.dtype == torch.float32 + self.model_cls._keep_in_fp32_modules = _keep_in_fp32_modules + + def test_modules_to_not_convert(self): + init_kwargs = self.get_dummy_model_init_kwargs() + quantization_config_kwargs = self.get_dummy_init_kwargs() + quantization_config_kwargs.update({"modules_to_not_convert": self.modules_to_not_convert}) + quantization_config = NVIDIAModelOptConfig(**quantization_config_kwargs) + init_kwargs.update({"quantization_config": quantization_config}) + + model = self.model_cls.from_pretrained(**init_kwargs) + model.to(torch_device) + + for name, module in model.named_modules(): + if name in self.modules_to_not_convert: + assert not mtq.utils.is_quantized(module) + + def test_dtype_assignment(self): + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + + with self.assertRaises(ValueError): + model.to(torch.float16) + + with self.assertRaises(ValueError): + device_0 = f"{torch_device}:0" + model.to(device=device_0, dtype=torch.float16) + + with self.assertRaises(ValueError): + model.float() + + with self.assertRaises(ValueError): + model.half() + + model.to(torch_device) + + def test_serialization(self): + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + inputs = self.get_dummy_inputs() + + model.to(torch_device) + with torch.no_grad(): + model_output = model(**inputs) + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir) + saved_model = self.model_cls.from_pretrained( + tmp_dir, + torch_dtype=torch.bfloat16, + ) + + saved_model.to(torch_device) + with torch.no_grad(): + saved_model_output = saved_model(**inputs) + + assert torch.allclose(model_output.sample, saved_model_output.sample, rtol=1e-5, atol=1e-5) + + def test_torch_compile(self): + if not self._test_torch_compile: + return + + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + compiled_model = torch.compile(model, mode="max-autotune", fullgraph=True, dynamic=False) + + model.to(torch_device) + with torch.no_grad(): + model_output = model(**self.get_dummy_inputs()).sample + + compiled_model.to(torch_device) + with torch.no_grad(): + compiled_model_output = compiled_model(**self.get_dummy_inputs()).sample + + model_output = model_output.detach().float().cpu().numpy() + compiled_model_output = compiled_model_output.detach().float().cpu().numpy() + + max_diff = numpy_cosine_similarity_distance(model_output.flatten(), compiled_model_output.flatten()) + assert max_diff < 1e-3 + + def test_device_map_error(self): + with self.assertRaises(ValueError): + _ = self.model_cls.from_pretrained( + **self.get_dummy_model_init_kwargs(), + device_map={0: "8GB", "cpu": "16GB"}, + ) + + def get_dummy_inputs(self): + batch_size = 1 + seq_len = 16 + height = width = 32 + num_latent_channels = 4 + caption_channels = 8 + + torch.manual_seed(0) + hidden_states = torch.randn((batch_size, num_latent_channels, height, width)).to( + torch_device, dtype=torch.bfloat16 + ) + encoder_hidden_states = torch.randn((batch_size, seq_len, caption_channels)).to( + torch_device, dtype=torch.bfloat16 + ) + timestep = torch.tensor([1.0]).to(torch_device, dtype=torch.bfloat16).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + def test_model_cpu_offload(self): + init_kwargs = self.get_dummy_init_kwargs() + transformer = self.model_cls.from_pretrained( + self.model_id, + quantization_config=NVIDIAModelOptConfig(**init_kwargs), + subfolder="transformer", + torch_dtype=torch.bfloat16, + ) + pipe = self.pipeline_cls.from_pretrained(self.model_id, transformer=transformer, torch_dtype=torch.bfloat16) + pipe.enable_model_cpu_offload(device=torch_device) + _ = pipe("a cat holding a sign that says hello", num_inference_steps=2) + + def test_training(self): + quantization_config = NVIDIAModelOptConfig(**self.get_dummy_init_kwargs()) + quantized_model = self.model_cls.from_pretrained( + self.model_id, + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + ).to(torch_device) + + for param in quantized_model.parameters(): + param.requires_grad = False + if param.ndim == 1: + param.data = param.data.to(torch.float32) + + for _, module in quantized_model.named_modules(): + if hasattr(module, "to_q"): + module.to_q = LoRALayer(module.to_q, rank=4) + if hasattr(module, "to_k"): + module.to_k = LoRALayer(module.to_k, rank=4) + if hasattr(module, "to_v"): + module.to_v = LoRALayer(module.to_v, rank=4) + + with torch.amp.autocast(str(torch_device), dtype=torch.bfloat16): + inputs = self.get_dummy_inputs() + output = quantized_model(**inputs)[0] + output.norm().backward() + + for module in quantized_model.modules(): + if isinstance(module, LoRALayer): + self.assertTrue(module.adapter[1].weight.grad is not None) + + +class SanaTransformerFP8WeightsTest(ModelOptBaseTesterMixin, unittest.TestCase): + expected_memory_reduction = 0.6 + + def get_dummy_init_kwargs(self): + return {"quant_type": "FP8"} + + +class SanaTransformerINT8WeightsTest(ModelOptBaseTesterMixin, unittest.TestCase): + expected_memory_reduction = 0.6 + _test_torch_compile = True + + def get_dummy_init_kwargs(self): + return {"quant_type": "INT8"} + + +@require_torch_cuda_compatibility(8.0) +class SanaTransformerINT4WeightsTest(ModelOptBaseTesterMixin, unittest.TestCase): + expected_memory_reduction = 0.55 + + def get_dummy_init_kwargs(self): + return { + "quant_type": "INT4", + "block_quantize": 128, + "channel_quantize": -1, + "disable_conv_quantization": True, + } + + +@require_torch_cuda_compatibility(8.0) +class SanaTransformerNF4WeightsTest(ModelOptBaseTesterMixin, unittest.TestCase): + expected_memory_reduction = 0.65 + + def get_dummy_init_kwargs(self): + return { + "quant_type": "NF4", + "block_quantize": 128, + "channel_quantize": -1, + "scale_block_quantize": 8, + "scale_channel_quantize": -1, + "modules_to_not_convert": ["conv"], + } + + +@require_torch_cuda_compatibility(8.0) +class SanaTransformerNVFP4WeightsTest(ModelOptBaseTesterMixin, unittest.TestCase): + expected_memory_reduction = 0.65 + + def get_dummy_init_kwargs(self): + return { + "quant_type": "NVFP4", + "block_quantize": 128, + "channel_quantize": -1, + "scale_block_quantize": 8, + "scale_channel_quantize": -1, + "modules_to_not_convert": ["conv"], + } From ffc8c0c1e1bba93213f3ed238ddc2897bf91fe8f Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 3 Sep 2025 11:15:27 +0530 Subject: [PATCH 46/74] [tests] feat: add AoT compilation tests (#12203) * feat: add a test for aot. * up --- tests/models/test_modeling_common.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index 36eb2c1ef4..5e7be62342 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -2059,6 +2059,7 @@ class TorchCompileTesterMixin: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) + model.eval() model = torch.compile(model, fullgraph=True) with ( @@ -2076,6 +2077,7 @@ class TorchCompileTesterMixin: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) + model.eval() model.compile_repeated_blocks(fullgraph=True) recompile_limit = 1 @@ -2098,7 +2100,6 @@ class TorchCompileTesterMixin: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) - model.eval() # TODO: Can test for other group offloading kwargs later if needed. group_offload_kwargs = { @@ -2111,11 +2112,11 @@ class TorchCompileTesterMixin: } model.enable_group_offload(**group_offload_kwargs) model.compile() + with torch.no_grad(): _ = model(**inputs_dict) _ = model(**inputs_dict) - @require_torch_version_greater("2.7.1") def test_compile_on_different_shapes(self): if self.different_shapes_for_compilation is None: pytest.skip(f"Skipping as `different_shapes_for_compilation` is not set for {self.__class__.__name__}.") @@ -2123,6 +2124,7 @@ class TorchCompileTesterMixin: init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) + model.eval() model = torch.compile(model, fullgraph=True, dynamic=True) for height, width in self.different_shapes_for_compilation: @@ -2130,6 +2132,26 @@ class TorchCompileTesterMixin: inputs_dict = self.prepare_dummy_input(height=height, width=width) _ = model(**inputs_dict) + def test_compile_works_with_aot(self): + from torch._inductor.package import load_package + + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict).to(torch_device) + exported_model = torch.export.export(model, args=(), kwargs=inputs_dict) + + with tempfile.TemporaryDirectory() as tmpdir: + package_path = os.path.join(tmpdir, f"{self.model_class.__name__}.pt2") + _ = torch._inductor.aoti_compile_and_package(exported_model, package_path=package_path) + assert os.path.exists(package_path) + loaded_binary = load_package(package_path, run_single_threaded=True) + + model.forward = loaded_binary + + with torch.no_grad(): + _ = model(**inputs_dict) + _ = model(**inputs_dict) + @slow @require_torch_2 From 6682956333b0105433fa31ec319aada7ffff2924 Mon Sep 17 00:00:00 2001 From: Ju Hoon Park Date: Wed, 3 Sep 2025 18:35:41 +0900 Subject: [PATCH 47/74] Add AttentionMixin to WanVACETransformer3DModel (#12268) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add AttentionMixin to WanVACETransformer3DModel to enable methods like `set_attn_processor()`. * Import AttentionMixin in transformer_wan_vace.py Special thanks to @tolgacangoz 🙇‍♂️ --- src/diffusers/models/transformers/transformer_wan_vace.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_wan_vace.py b/src/diffusers/models/transformers/transformer_wan_vace.py index e039d36219..e5a9c7e0a6 100644 --- a/src/diffusers/models/transformers/transformer_wan_vace.py +++ b/src/diffusers/models/transformers/transformer_wan_vace.py @@ -21,7 +21,7 @@ import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers -from ..attention import FeedForward +from ..attention import AttentionMixin, FeedForward from ..cache_utils import CacheMixin from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin @@ -134,7 +134,9 @@ class WanVACETransformerBlock(nn.Module): return conditioning_states, control_hidden_states -class WanVACETransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): +class WanVACETransformer3DModel( + ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin +): r""" A Transformer model for video-like data used in the Wan model. From 764b62473ac10afd9c52e6b3f3f528f719bc7a34 Mon Sep 17 00:00:00 2001 From: co63oc Date: Wed, 3 Sep 2025 23:58:24 +0800 Subject: [PATCH 48/74] fix some typos (#12265) Signed-off-by: co63oc --- .../geodiff/geodiff_molecule_conformation.ipynb | 2 +- .../multi_subject_dreambooth_inpainting/README.md | 2 +- src/diffusers/guiders/frequency_decoupled_guidance.py | 2 +- src/diffusers/hooks/faster_cache.py | 6 +++--- src/diffusers/hooks/pyramid_attention_broadcast.py | 8 ++++---- src/diffusers/modular_pipelines/flux/denoise.py | 2 +- src/diffusers/modular_pipelines/modular_pipeline.py | 4 ++-- src/diffusers/modular_pipelines/node_utils.py | 4 ++-- .../modular_pipelines/stable_diffusion_xl/denoise.py | 8 ++++---- src/diffusers/modular_pipelines/wan/denoise.py | 2 +- src/diffusers/pipelines/pipeline_loading_utils.py | 2 +- 11 files changed, 21 insertions(+), 21 deletions(-) diff --git a/examples/research_projects/geodiff/geodiff_molecule_conformation.ipynb b/examples/research_projects/geodiff/geodiff_molecule_conformation.ipynb index a39bcc5eea..3d5b8adfba 100644 --- a/examples/research_projects/geodiff/geodiff_molecule_conformation.ipynb +++ b/examples/research_projects/geodiff/geodiff_molecule_conformation.ipynb @@ -1760,7 +1760,7 @@ "clip_local = None\n", "clip_pos = None\n", "\n", - "# constands for data handling\n", + "# constants for data handling\n", "save_traj = False\n", "save_data = False\n", "output_dir = \"/content/\"" diff --git a/examples/research_projects/multi_subject_dreambooth_inpainting/README.md b/examples/research_projects/multi_subject_dreambooth_inpainting/README.md index 32c375efea..8ddef1b83c 100644 --- a/examples/research_projects/multi_subject_dreambooth_inpainting/README.md +++ b/examples/research_projects/multi_subject_dreambooth_inpainting/README.md @@ -2,7 +2,7 @@ Please note that this project is not actively maintained. However, you can open an issue and tag @gzguevara. -[DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. This project consists of **two parts**. Training Stable Diffusion for inpainting requieres prompt-image-mask pairs. The Unet of inpainiting models have 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself). +[DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. This project consists of **two parts**. Training Stable Diffusion for inpainting requires prompt-image-mask pairs. The Unet of inpainiting models have 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself). **The first part**, the `multi_inpaint_dataset.ipynb` notebook, demonstrates how make a 🤗 dataset of prompt-image-mask pairs. You can, however, skip the first part and move straight to the second part with the example datasets in this project. ([cat toy dataset masked](https://huggingface.co/datasets/gzguevara/cat_toy_masked), [mr. potato head dataset masked](https://huggingface.co/datasets/gzguevara/mr_potato_head_masked)) diff --git a/src/diffusers/guiders/frequency_decoupled_guidance.py b/src/diffusers/guiders/frequency_decoupled_guidance.py index 2bf2f430b1..93822a180e 100644 --- a/src/diffusers/guiders/frequency_decoupled_guidance.py +++ b/src/diffusers/guiders/frequency_decoupled_guidance.py @@ -61,7 +61,7 @@ def project(v0: torch.Tensor, v1: torch.Tensor, upcast_to_double: bool = True) - def build_image_from_pyramid(pyramid: List[torch.Tensor]) -> torch.Tensor: """ Recovers the data space latents from the Laplacian pyramid frequency space. Implementation from the paper - (Algorihtm 2). + (Algorithm 2). """ # pyramid shapes: [[B, C, H, W], [B, C, H/2, W/2], ...] img = pyramid[-1] diff --git a/src/diffusers/hooks/faster_cache.py b/src/diffusers/hooks/faster_cache.py index 53e5bd792c..a01afeffdb 100644 --- a/src/diffusers/hooks/faster_cache.py +++ b/src/diffusers/hooks/faster_cache.py @@ -54,11 +54,11 @@ class FasterCacheConfig: Attributes: spatial_attention_block_skip_range (`int`, defaults to `2`): Calculate the attention states every `N` iterations. If this is set to `N`, the attention computation will - be skipped `N - 1` times (i.e., cached attention states will be re-used) before computing the new attention + be skipped `N - 1` times (i.e., cached attention states will be reused) before computing the new attention states again. temporal_attention_block_skip_range (`int`, *optional*, defaults to `None`): Calculate the attention states every `N` iterations. If this is set to `N`, the attention computation will - be skipped `N - 1` times (i.e., cached attention states will be re-used) before computing the new attention + be skipped `N - 1` times (i.e., cached attention states will be reused) before computing the new attention states again. spatial_attention_timestep_skip_range (`Tuple[float, float]`, defaults to `(-1, 681)`): The timestep range within which the spatial attention computation can be skipped without a significant loss @@ -90,7 +90,7 @@ class FasterCacheConfig: from the conditional branch outputs. unconditional_batch_skip_range (`int`, defaults to `5`): Process the unconditional branch every `N` iterations. If this is set to `N`, the unconditional branch - computation will be skipped `N - 1` times (i.e., cached unconditional branch states will be re-used) before + computation will be skipped `N - 1` times (i.e., cached unconditional branch states will be reused) before computing the new unconditional branch states again. unconditional_batch_timestep_skip_range (`Tuple[float, float]`, defaults to `(-1, 641)`): The timestep range within which the unconditional branch computation can be skipped without a significant diff --git a/src/diffusers/hooks/pyramid_attention_broadcast.py b/src/diffusers/hooks/pyramid_attention_broadcast.py index ee3f410331..12d6aa0616 100644 --- a/src/diffusers/hooks/pyramid_attention_broadcast.py +++ b/src/diffusers/hooks/pyramid_attention_broadcast.py @@ -45,15 +45,15 @@ class PyramidAttentionBroadcastConfig: spatial_attention_block_skip_range (`int`, *optional*, defaults to `None`): The number of times a specific spatial attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., - old attention states will be re-used) before computing the new attention states again. + old attention states will be reused) before computing the new attention states again. temporal_attention_block_skip_range (`int`, *optional*, defaults to `None`): The number of times a specific temporal attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times - (i.e., old attention states will be re-used) before computing the new attention states again. + (i.e., old attention states will be reused) before computing the new attention states again. cross_attention_block_skip_range (`int`, *optional*, defaults to `None`): The number of times a specific cross-attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., - old attention states will be re-used) before computing the new attention states again. + old attention states will be reused) before computing the new attention states again. spatial_attention_timestep_skip_range (`Tuple[int, int]`, defaults to `(100, 800)`): The range of timesteps to skip in the spatial attention layer. The attention computations will be conditionally skipped if the current timestep is within the specified range. @@ -305,7 +305,7 @@ def _apply_pyramid_attention_broadcast_hook( block_skip_range (`int`): The number of times a specific attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., old - attention states will be re-used) before computing the new attention states again. + attention states will be reused) before computing the new attention states again. current_timestep_callback (`Callable[[], int]`): A callback function that returns the current inference timestep. """ diff --git a/src/diffusers/modular_pipelines/flux/denoise.py b/src/diffusers/modular_pipelines/flux/denoise.py index ffb436abd4..ffa0a4456f 100644 --- a/src/diffusers/modular_pipelines/flux/denoise.py +++ b/src/diffusers/modular_pipelines/flux/denoise.py @@ -220,7 +220,7 @@ class FluxDenoiseStep(FluxDenoiseLoopWrapper): return ( "Denoise step that iteratively denoise the latents. \n" "Its loop logic is defined in `FluxDenoiseLoopWrapper.__call__` method \n" - "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `FluxLoopDenoiser`\n" " - `FluxLoopAfterDenoiser`\n" "This block supports both text2image and img2img tasks." diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index 3918679c16..c0524a1f86 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -229,7 +229,7 @@ class ModularPipelineBlocks(ConfigMixin, PushToHubMixin): Base class for all Pipeline Blocks: PipelineBlock, AutoPipelineBlocks, SequentialPipelineBlocks, LoopSequentialPipelineBlocks - [`ModularPipelineBlocks`] provides method to load and save the defination of pipeline blocks. + [`ModularPipelineBlocks`] provides method to load and save the definition of pipeline blocks. @@ -1418,7 +1418,7 @@ class LoopSequentialPipelineBlocks(ModularPipelineBlocks): # YiYi TODO: # 1. look into the serialization of modular_model_index.json, make sure the items are properly ordered like model_index.json (currently a mess) # 2. do we need ConfigSpec? the are basically just key/val kwargs -# 3. imnprove docstring and potentially add validator for methods where we accpet kwargs to be passed to from_pretrained/save_pretrained/load_components() +# 3. imnprove docstring and potentially add validator for methods where we accept kwargs to be passed to from_pretrained/save_pretrained/load_components() class ModularPipeline(ConfigMixin, PushToHubMixin): """ Base class for all Modular pipelines. diff --git a/src/diffusers/modular_pipelines/node_utils.py b/src/diffusers/modular_pipelines/node_utils.py index fb9a03c755..5db860c788 100644 --- a/src/diffusers/modular_pipelines/node_utils.py +++ b/src/diffusers/modular_pipelines/node_utils.py @@ -384,14 +384,14 @@ class ModularNode(ConfigMixin): # pass or create a default param dict for each input # e.g. for prompt, # prompt = { - # "name": "text_input", # the name of the input in node defination, could be different from the input name in diffusers + # "name": "text_input", # the name of the input in node definition, could be different from the input name in diffusers # "label": "Prompt", # "type": "string", # "default": "a bear sitting in a chair drinking a milkshake", # "display": "textarea"} # if type is not specified, it'll be a "custom" param of its own type # e.g. you can pass ModularNode(scheduler = {name :"scheduler"}) - # it will get this spec in node defination {"scheduler": {"label": "Scheduler", "type": "scheduler", "display": "input"}} + # it will get this spec in node definition {"scheduler": {"label": "Scheduler", "type": "scheduler", "display": "input"}} # name can be a dict, in that case, it is part of a "dict" input in mellon nodes, e.g. text_encoder= {name: {"text_encoders": "text_encoder"}} inputs = self.blocks.inputs + self.blocks.intermediate_inputs for inp in inputs: diff --git a/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py b/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py index 34e07dff8a..a2e1420595 100644 --- a/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +++ b/src/diffusers/modular_pipelines/stable_diffusion_xl/denoise.py @@ -695,7 +695,7 @@ class StableDiffusionXLDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): return ( "Denoise step that iteratively denoise the latents. \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" - "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `StableDiffusionXLLoopBeforeDenoiser`\n" " - `StableDiffusionXLLoopDenoiser`\n" " - `StableDiffusionXLLoopAfterDenoiser`\n" @@ -717,7 +717,7 @@ class StableDiffusionXLControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper return ( "Denoise step that iteratively denoise the latents with controlnet. \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" - "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `StableDiffusionXLLoopBeforeDenoiser`\n" " - `StableDiffusionXLControlNetLoopDenoiser`\n" " - `StableDiffusionXLLoopAfterDenoiser`\n" @@ -739,7 +739,7 @@ class StableDiffusionXLInpaintDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): return ( "Denoise step that iteratively denoise the latents(for inpainting task only). \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" - "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `StableDiffusionXLInpaintLoopBeforeDenoiser`\n" " - `StableDiffusionXLLoopDenoiser`\n" " - `StableDiffusionXLInpaintLoopAfterDenoiser`\n" @@ -761,7 +761,7 @@ class StableDiffusionXLInpaintControlNetDenoiseStep(StableDiffusionXLDenoiseLoop return ( "Denoise step that iteratively denoise the latents(for inpainting task only) with controlnet. \n" "Its loop logic is defined in `StableDiffusionXLDenoiseLoopWrapper.__call__` method \n" - "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `StableDiffusionXLInpaintLoopBeforeDenoiser`\n" " - `StableDiffusionXLControlNetLoopDenoiser`\n" " - `StableDiffusionXLInpaintLoopAfterDenoiser`\n" diff --git a/src/diffusers/modular_pipelines/wan/denoise.py b/src/diffusers/modular_pipelines/wan/denoise.py index 34297bcfb5..5f578609c2 100644 --- a/src/diffusers/modular_pipelines/wan/denoise.py +++ b/src/diffusers/modular_pipelines/wan/denoise.py @@ -253,7 +253,7 @@ class WanDenoiseStep(WanDenoiseLoopWrapper): return ( "Denoise step that iteratively denoise the latents. \n" "Its loop logic is defined in `WanDenoiseLoopWrapper.__call__` method \n" - "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `WanLoopDenoiser`\n" " - `WanLoopAfterDenoiser`\n" "This block supports both text2vid tasks." diff --git a/src/diffusers/pipelines/pipeline_loading_utils.py b/src/diffusers/pipelines/pipeline_loading_utils.py index 2c611aa2c0..ee767eddcc 100644 --- a/src/diffusers/pipelines/pipeline_loading_utils.py +++ b/src/diffusers/pipelines/pipeline_loading_utils.py @@ -613,7 +613,7 @@ def _assign_components_to_devices( def _get_final_device_map(device_map, pipeline_class, passed_class_obj, init_dict, library, max_memory, **kwargs): - # TODO: seperate out different device_map methods when it gets to it. + # TODO: separate out different device_map methods when it gets to it. if device_map != "balanced": return device_map # To avoid circular import problem. From c2e5ece08bf22d249c62e964f91bc326cf9e3759 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Thu, 4 Sep 2025 11:43:47 -0700 Subject: [PATCH 49/74] [docs] Sharing pipelines/models (#12280) init --- docs/source/en/_toctree.yml | 2 +- docs/source/en/using-diffusers/push_to_hub.md | 49 +++++++++++-------- 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index a97c82796f..32bca81b6a 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -29,7 +29,7 @@ - local: using-diffusers/other-formats title: Model files and layouts - local: using-diffusers/push_to_hub - title: Push files to the Hub + title: Sharing pipelines and models - title: Adapters isExpanded: false diff --git a/docs/source/en/using-diffusers/push_to_hub.md b/docs/source/en/using-diffusers/push_to_hub.md index c77ce27656..4319f620a9 100644 --- a/docs/source/en/using-diffusers/push_to_hub.md +++ b/docs/source/en/using-diffusers/push_to_hub.md @@ -10,19 +10,22 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Push files to the Hub - [[open-in-colab]] -🤗 Diffusers provides a [`~diffusers.utils.PushToHubMixin`] for uploading your model, scheduler, or pipeline to the Hub. It is an easy way to store your files on the Hub, and also allows you to share your work with others. Under the hood, the [`~diffusers.utils.PushToHubMixin`]: +# Sharing pipelines and models + +Share your pipeline or models and schedulers on the Hub with the [`~diffusers.utils.PushToHubMixin`] class. This class: 1. creates a repository on the Hub 2. saves your model, scheduler, or pipeline files so they can be reloaded later 3. uploads folder containing these files to the Hub -This guide will show you how to use the [`~diffusers.utils.PushToHubMixin`] to upload your files to the Hub. +This guide will show you how to upload your files to the Hub with the [`~diffusers.utils.PushToHubMixin`] class. -You'll need to log in to your Hub account with your access [token](https://huggingface.co/settings/tokens) first: +Log in to your Hugging Face account with your access [token](https://huggingface.co/settings/tokens). + + + ```py from huggingface_hub import notebook_login @@ -30,9 +33,19 @@ from huggingface_hub import notebook_login notebook_login() ``` + + + +```bash +hf auth login +``` + + + + ## Models -To push a model to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the model to be stored on the Hub: +To push a model to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the model. ```py from diffusers import ControlNetModel @@ -48,15 +61,9 @@ controlnet = ControlNetModel( controlnet.push_to_hub("my-controlnet-model") ``` -For models, you can also specify the [*variant*](loading#checkpoint-variants) of the weights to push to the Hub. For example, to push `fp16` weights: +The [`~diffusers.utils.PushToHubMixin.push_to_hub`] method saves the model's `config.json` file and the weights are automatically saved as safetensors files. -```py -controlnet.push_to_hub("my-controlnet-model", variant="fp16") -``` - -The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves the model's `config.json` file and the weights are automatically saved in the `safetensors` format. - -Now you can reload the model from your repository on the Hub: +Load the model again with [`~DiffusionPipeline.from_pretrained`]. ```py model = ControlNetModel.from_pretrained("your-namespace/my-controlnet-model") @@ -64,7 +71,7 @@ model = ControlNetModel.from_pretrained("your-namespace/my-controlnet-model") ## Scheduler -To push a scheduler to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the scheduler to be stored on the Hub: +To push a scheduler to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the scheduler. ```py from diffusers import DDIMScheduler @@ -81,7 +88,7 @@ scheduler.push_to_hub("my-controlnet-scheduler") The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves the scheduler's `scheduler_config.json` file to the specified repository. -Now you can reload the scheduler from your repository on the Hub: +Load the scheduler again with [`~SchedulerMixin.from_pretrained`]. ```py scheduler = DDIMScheduler.from_pretrained("your-namepsace/my-controlnet-scheduler") @@ -89,7 +96,7 @@ scheduler = DDIMScheduler.from_pretrained("your-namepsace/my-controlnet-schedule ## Pipeline -You can also push an entire pipeline with all it's components to the Hub. For example, initialize the components of a [`StableDiffusionPipeline`] with the parameters you want: +To push a pipeline to the Hub, initialize the pipeline components with your desired parameters. ```py from diffusers import ( @@ -143,7 +150,7 @@ text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") ``` -Pass all of the components to the [`StableDiffusionPipeline`] and call [`~diffusers.utils.PushToHubMixin.push_to_hub`] to push the pipeline to the Hub: +Pass all components to the pipeline and call [`~diffusers.utils.PushToHubMixin.push_to_hub`]. ```py components = { @@ -160,7 +167,7 @@ pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub("my-pipeline") ``` -The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves each component to a subfolder in the repository. Now you can reload the pipeline from your repository on the Hub: +The [`~diffusers.utils.PushToHubMixin.push_to_hub`] method saves each component to a subfolder in the repository. Load the pipeline again with [`~DiffusionPipeline.from_pretrained`]. ```py pipeline = StableDiffusionPipeline.from_pretrained("your-namespace/my-pipeline") @@ -168,10 +175,10 @@ pipeline = StableDiffusionPipeline.from_pretrained("your-namespace/my-pipeline") ## Privacy -Set `private=True` in the [`~diffusers.utils.PushToHubMixin.push_to_hub`] function to keep your model, scheduler, or pipeline files private: +Set `private=True` in [`~diffusers.utils.PushToHubMixin.push_to_hub`] to keep a model, scheduler, or pipeline files private. ```py controlnet.push_to_hub("my-controlnet-model-private", private=True) ``` -Private repositories are only visible to you, and other users won't be able to clone the repository and your repository won't appear in search results. Even if a user has the URL to your private repository, they'll receive a `404 - Sorry, we can't find the page you are looking for`. You must be [logged in](https://huggingface.co/docs/huggingface_hub/quick-start#login) to load a model from a private repository. \ No newline at end of file +Private repositories are only visible to you. Other users won't be able to clone the repository and it won't appear in search results. Even if a user has the URL to your private repository, they'll receive a `404 - Sorry, we can't find the page you are looking for`. You must be [logged in](https://huggingface.co/docs/huggingface_hub/quick-start#login) to load a model from a private repository. \ No newline at end of file From 32798bf242a6b15e91a6fadc444f8806b4e8bb46 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 5 Sep 2025 09:34:37 -0700 Subject: [PATCH 50/74] [docs] Inference section cleanup (#12281) init Co-authored-by: Sayak Paul --- docs/source/en/_toctree.yml | 8 ++------ docs/source/en/using-diffusers/image_quality.md | 10 ++-------- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 32bca81b6a..b33989aed0 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -58,12 +58,6 @@ title: Batch inference - local: training/distributed_inference title: Distributed inference - - local: using-diffusers/scheduler_features - title: Scheduler features - - local: using-diffusers/callback - title: Pipeline callbacks - - local: using-diffusers/image_quality - title: Controlling image quality - title: Inference optimization isExpanded: false @@ -92,6 +86,8 @@ title: xDiT - local: optimization/para_attn title: ParaAttention + - local: using-diffusers/image_quality + title: FreeU - title: Hybrid Inference isExpanded: false diff --git a/docs/source/en/using-diffusers/image_quality.md b/docs/source/en/using-diffusers/image_quality.md index 517d985190..29ce483d5e 100644 --- a/docs/source/en/using-diffusers/image_quality.md +++ b/docs/source/en/using-diffusers/image_quality.md @@ -10,13 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Controlling image quality - -The components of a diffusion model, like the UNet and scheduler, can be optimized to improve the quality of generated images leading to better details. These techniques are especially useful if you don't have the resources to simply use a larger model for inference. You can enable these techniques during inference without any additional training. - -This guide will show you how to turn these techniques on in your pipeline and how to configure them to improve the quality of your generated images. - -## Details +# FreeU [FreeU](https://hf.co/papers/2309.11497) improves image details by rebalancing the UNet's backbone and skip connection weights. The skip connections can cause the model to overlook some of the backbone semantics which may lead to unnatural image details in the generated image. This technique does not require any additional training and can be applied on the fly during inference for tasks like image-to-image and text-to-video. @@ -139,7 +133,7 @@ export_to_video(video_frames, "teddy_bear.mp4", fps=10)
-Call the [`pipelines.StableDiffusionMixin.disable_freeu`] method to disable FreeU. +Call the [`~pipelines.StableDiffusionMixin.disable_freeu`] method to disable FreeU. ```py pipeline.disable_freeu() From fc337d585309c4b032e8d0180bea683007219df1 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 5 Sep 2025 11:52:09 -0700 Subject: [PATCH 51/74] [docs] Models (#12248) * init * fix * feedback * feedback --- docs/source/en/_toctree.yml | 2 + docs/source/en/using-diffusers/loading.md | 39 ++----- docs/source/en/using-diffusers/models.md | 120 ++++++++++++++++++++++ 3 files changed, 128 insertions(+), 33 deletions(-) create mode 100644 docs/source/en/using-diffusers/models.md diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index b33989aed0..14dbfe3ea1 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -24,6 +24,8 @@ title: Reproducibility - local: using-diffusers/schedulers title: Load schedulers and models + - local: using-diffusers/models + title: Models - local: using-diffusers/scheduler_features title: Scheduler features - local: using-diffusers/other-formats diff --git a/docs/source/en/using-diffusers/loading.md b/docs/source/en/using-diffusers/loading.md index f86ea104cf..25b53d2f4d 100644 --- a/docs/source/en/using-diffusers/loading.md +++ b/docs/source/en/using-diffusers/loading.md @@ -108,23 +108,20 @@ print(pipeline.transformer.dtype, pipeline.vae.dtype) The `device_map` argument determines individual model or pipeline placement on an accelerator like a GPU. It is especially helpful when there are multiple GPUs. -Diffusers currently provides three options to `device_map`, `"cuda"`, `"balanced"` and `"auto"`. Refer to the table below to compare the three placement strategies. +A pipeline supports two options for `device_map`, `"cuda"` and `"balanced"`. Refer to the table below to compare the placement strategies. | parameter | description | |---|---| -| `"cuda"` | places model or pipeline on CUDA device | -| `"balanced"` | evenly distributes model or pipeline on all GPUs | -| `"auto"` | distribute model from fastest device first to slowest | +| `"cuda"` | places pipeline on a supported accelerator device like CUDA | +| `"balanced"` | evenly distributes pipeline on all GPUs | Use the `max_memory` argument in [`~DiffusionPipeline.from_pretrained`] to allocate a maximum amount of memory to use on each device. By default, Diffusers uses the maximum amount available. - - - ```py import torch from diffusers import DiffusionPipeline +max_memory = {0: "16GB", 1: "16GB"} pipeline = DiffusionPipeline.from_pretrained( "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, @@ -132,26 +129,6 @@ pipeline = DiffusionPipeline.from_pretrained( ) ``` - - - -```py -import torch -from diffusers import AutoModel - -max_memory = {0: "16GB", 1: "16GB"} -transformer = AutoModel.from_pretrained( - "Qwen/Qwen-Image", - subfolder="transformer", - torch_dtype=torch.bfloat16 - device_map="cuda", - max_memory=max_memory -) -``` - - - - The `hf_device_map` attribute allows you to access and view the `device_map`. ```py @@ -189,22 +166,18 @@ pipeline = DiffusionPipeline.from_pretrained( [`DiffusionPipeline`] is flexible and accommodates loading different models or schedulers. You can experiment with different schedulers to optimize for generation speed or quality, and you can replace models with more performant ones. -The example below swaps the default scheduler to generate higher quality images and a more stable VAE version. Pass the `subfolder` argument in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler to the correct subfolder. +The example below uses a more stable VAE version. ```py import torch -from diffusers import DiffusionPipeline, HeunDiscreteScheduler, AutoModel +from diffusers import DiffusionPipeline, AutoModel -scheduler = HeunDiscreteScheduler.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler" -) vae = AutoModel.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ) pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", - scheduler=scheduler, vae=vae, torch_dtype=torch.float16, device_map="cuda" diff --git a/docs/source/en/using-diffusers/models.md b/docs/source/en/using-diffusers/models.md new file mode 100644 index 0000000000..22c78d490a --- /dev/null +++ b/docs/source/en/using-diffusers/models.md @@ -0,0 +1,120 @@ + + +[[open-in-colab]] + +# Models + +A diffusion model relies on a few individual models working together to generate an output. These models are responsible for denoising, encoding inputs, and decoding latents into the actual outputs. + +This guide will show you how to load models. + +## Loading a model + +All models are loaded with the [`~ModelMixin.from_pretrained`] method, which downloads and caches the latest model version. If the latest files are available in the local cache, [`~ModelMixin.from_pretrained`] reuses files in the cache. + +Pass the `subfolder` argument to [`~ModelMixin.from_pretrained`] to specify where to load the model weights from. Omit the `subfolder` argument if the repository doesn't have a subfolder structure or if you're loading a standalone model. + +```py +from diffusers import QwenImageTransformer2DModel + +model = QwenImageTransformer2DModel.from_pretrained("Qwen/Qwen-Image", subfolder="transformer") +``` + +## AutoModel + +[`AutoModel`] detects the model class from a `model_index.json` file or a model's `config.json` file. It fetches the correct model class from these files and delegates the actual loading to the model class. [`AutoModel`] is useful for automatic model type detection without needing to know the exact model class beforehand. + +```py +from diffusers import AutoModel + +model = AutoModel.from_pretrained( + "Qwen/Qwen-Image", subfolder="transformer" +) +``` + +## Model data types + +Use the `torch_dtype` argument in [`~ModelMixin.from_pretrained`] to load a model with a specific data type. This allows you to load a model in a lower precision to reduce memory usage. + +```py +import torch +from diffusers import QwenImageTransformer2DModel + +model = QwenImageTransformer2DModel.from_pretrained( + "Qwen/Qwen-Image", + subfolder="transformer", + torch_dtype=torch.bfloat16 +) +``` + +[nn.Module.to](https://docs.pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.to) can also convert to a specific data type on the fly. However, it converts *all* weights to the requested data type unlike `torch_dtype` which respects `_keep_in_fp32_modules`. This argument preserves layers in `torch.float32` for numerical stability and best generation quality (see example [_keep_in_fp32_modules](https://github.com/huggingface/diffusers/blob/f864a9a352fa4a220d860bfdd1782e3e5af96382/src/diffusers/models/transformers/transformer_wan.py#L374)) + +```py +from diffusers import QwenImageTransformer2DModel + +model = QwenImageTransformer2DModel.from_pretrained( + "Qwen/Qwen-Image", subfolder="transformer" +) +model = model.to(dtype=torch.float16) +``` + +## Device placement + +Use the `device_map` argument in [`~ModelMixin.from_pretrained`] to place a model on an accelerator like a GPU. It is especially helpful where there are multiple GPUs. + +Diffusers currently provides three options to `device_map` for individual models, `"cuda"`, `"balanced"` and `"auto"`. Refer to the table below to compare the three placement strategies. + +| parameter | description | +|---|---| +| `"cuda"` | places pipeline on a supported accelerator (CUDA) | +| `"balanced"` | evenly distributes pipeline on all GPUs | +| `"auto"` | distribute model from fastest device first to slowest | + +Use the `max_memory` argument in [`~ModelMixin.from_pretrained`] to allocate a maximum amount of memory to use on each device. By default, Diffusers uses the maximum amount available. + +```py +import torch +from diffusers import QwenImagePipeline + +max_memory = {0: "16GB", 1: "16GB"} +pipeline = QwenImagePipeline.from_pretrained( + "Qwen/Qwen-Image", + torch_dtype=torch.bfloat16, + device_map="cuda", + max_memory=max_memory +) +``` + +The `hf_device_map` attribute allows you to access and view the `device_map`. + +```py +print(transformer.hf_device_map) +# {'': device(type='cuda')} +``` + +## Saving models + +Save a model with the [`~ModelMixin.save_pretrained`] method. + +```py +from diffusers import QwenImageTransformer2DModel + +model = QwenImageTransformer2DModel.from_pretrained("Qwen/Qwen-Image", subfolder="transformer") +model.save_pretrained("./local/model") +``` + +For large models, it is helpful to use `max_shard_size` to save a model as multiple shards. A shard can be loaded faster and save memory (refer to the [parallel loading](./loading#parallel-loading) docs for more details), especially if there is more than one GPU. + +```py +model.save_pretrained("./local/model", max_shard_size="5GB") +``` From f50b18eec7d646bf98aef576dbb0f47ff512beaa Mon Sep 17 00:00:00 2001 From: YiYi Xu Date: Mon, 8 Sep 2025 00:27:02 -1000 Subject: [PATCH 52/74] [Modular] Qwen (#12220) * add qwen modular --- docs/source/en/api/image_processor.md | 6 + src/diffusers/__init__.py | 8 + src/diffusers/hooks/_helpers.py | 10 + src/diffusers/image_processor.py | 132 +++ src/diffusers/modular_pipelines/__init__.py | 12 + .../modular_pipelines/modular_pipeline.py | 37 +- .../modular_pipelines/qwenimage/__init__.py | 75 ++ .../qwenimage/before_denoise.py | 727 +++++++++++++++ .../modular_pipelines/qwenimage/decoders.py | 203 +++++ .../modular_pipelines/qwenimage/denoise.py | 668 ++++++++++++++ .../modular_pipelines/qwenimage/encoders.py | 857 ++++++++++++++++++ .../modular_pipelines/qwenimage/inputs.py | 431 +++++++++ .../qwenimage/modular_blocks.py | 841 +++++++++++++++++ .../qwenimage/modular_pipeline.py | 202 +++++ .../stable_diffusion_xl/modular_pipeline.py | 1 + src/diffusers/pipelines/auto_pipeline.py | 14 + .../dummy_torch_and_transformers_objects.py | 60 ++ 17 files changed, 4275 insertions(+), 9 deletions(-) create mode 100644 src/diffusers/modular_pipelines/qwenimage/__init__.py create mode 100644 src/diffusers/modular_pipelines/qwenimage/before_denoise.py create mode 100644 src/diffusers/modular_pipelines/qwenimage/decoders.py create mode 100644 src/diffusers/modular_pipelines/qwenimage/denoise.py create mode 100644 src/diffusers/modular_pipelines/qwenimage/encoders.py create mode 100644 src/diffusers/modular_pipelines/qwenimage/inputs.py create mode 100644 src/diffusers/modular_pipelines/qwenimage/modular_blocks.py create mode 100644 src/diffusers/modular_pipelines/qwenimage/modular_pipeline.py diff --git a/docs/source/en/api/image_processor.md b/docs/source/en/api/image_processor.md index 3e75af026d..82d1837b0b 100644 --- a/docs/source/en/api/image_processor.md +++ b/docs/source/en/api/image_processor.md @@ -20,6 +20,12 @@ All pipelines with [`VaeImageProcessor`] accept PIL Image, PyTorch tensor, or Nu [[autodoc]] image_processor.VaeImageProcessor +## InpaintProcessor + +The [`InpaintProcessor`] accepts `mask` and `image` inputs and process them together. Optionally, it can accept padding_mask_crop and apply mask overlay. + +[[autodoc]] image_processor.InpaintProcessor + ## VaeImageProcessorLDM3D The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs. diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index fa5dd6482c..4c06440172 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -385,6 +385,10 @@ else: [ "FluxAutoBlocks", "FluxModularPipeline", + "QwenImageAutoBlocks", + "QwenImageEditAutoBlocks", + "QwenImageEditModularPipeline", + "QwenImageModularPipeline", "StableDiffusionXLAutoBlocks", "StableDiffusionXLModularPipeline", "WanAutoBlocks", @@ -1038,6 +1042,10 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .modular_pipelines import ( FluxAutoBlocks, FluxModularPipeline, + QwenImageAutoBlocks, + QwenImageEditAutoBlocks, + QwenImageEditModularPipeline, + QwenImageModularPipeline, StableDiffusionXLAutoBlocks, StableDiffusionXLModularPipeline, WanAutoBlocks, diff --git a/src/diffusers/hooks/_helpers.py b/src/diffusers/hooks/_helpers.py index b7a74be2e5..f6e5bdd52d 100644 --- a/src/diffusers/hooks/_helpers.py +++ b/src/diffusers/hooks/_helpers.py @@ -108,6 +108,7 @@ def _register_attention_processors_metadata(): from ..models.attention_processor import AttnProcessor2_0 from ..models.transformers.transformer_cogview4 import CogView4AttnProcessor from ..models.transformers.transformer_flux import FluxAttnProcessor + from ..models.transformers.transformer_qwenimage import QwenDoubleStreamAttnProcessor2_0 from ..models.transformers.transformer_wan import WanAttnProcessor2_0 # AttnProcessor2_0 @@ -140,6 +141,14 @@ def _register_attention_processors_metadata(): metadata=AttentionProcessorMetadata(skip_processor_output_fn=_skip_proc_output_fn_Attention_FluxAttnProcessor), ) + # QwenDoubleStreamAttnProcessor2 + AttentionProcessorRegistry.register( + model_class=QwenDoubleStreamAttnProcessor2_0, + metadata=AttentionProcessorMetadata( + skip_processor_output_fn=_skip_proc_output_fn_Attention_QwenDoubleStreamAttnProcessor2_0 + ), + ) + def _register_transformer_blocks_metadata(): from ..models.attention import BasicTransformerBlock @@ -298,4 +307,5 @@ _skip_proc_output_fn_Attention_CogView4AttnProcessor = _skip_attention___ret___h _skip_proc_output_fn_Attention_WanAttnProcessor2_0 = _skip_attention___ret___hidden_states # not sure what this is yet. _skip_proc_output_fn_Attention_FluxAttnProcessor = _skip_attention___ret___hidden_states +_skip_proc_output_fn_Attention_QwenDoubleStreamAttnProcessor2_0 = _skip_attention___ret___hidden_states # fmt: on diff --git a/src/diffusers/image_processor.py b/src/diffusers/image_processor.py index 6a3cf77a7d..0e3082eada 100644 --- a/src/diffusers/image_processor.py +++ b/src/diffusers/image_processor.py @@ -523,6 +523,7 @@ class VaeImageProcessor(ConfigMixin): size=(height, width), ) image = self.pt_to_numpy(image) + return image def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image: @@ -838,6 +839,137 @@ class VaeImageProcessor(ConfigMixin): return image +class InpaintProcessor(ConfigMixin): + """ + Image processor for inpainting image and mask. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + vae_latent_channels: int = 4, + resample: str = "lanczos", + reducing_gap: int = None, + do_normalize: bool = True, + do_binarize: bool = False, + do_convert_grayscale: bool = False, + mask_do_normalize: bool = False, + mask_do_binarize: bool = True, + mask_do_convert_grayscale: bool = True, + ): + super().__init__() + + self._image_processor = VaeImageProcessor( + do_resize=do_resize, + vae_scale_factor=vae_scale_factor, + vae_latent_channels=vae_latent_channels, + resample=resample, + reducing_gap=reducing_gap, + do_normalize=do_normalize, + do_binarize=do_binarize, + do_convert_grayscale=do_convert_grayscale, + ) + self._mask_processor = VaeImageProcessor( + do_resize=do_resize, + vae_scale_factor=vae_scale_factor, + vae_latent_channels=vae_latent_channels, + resample=resample, + reducing_gap=reducing_gap, + do_normalize=mask_do_normalize, + do_binarize=mask_do_binarize, + do_convert_grayscale=mask_do_convert_grayscale, + ) + + def preprocess( + self, + image: PIL.Image.Image, + mask: PIL.Image.Image = None, + height: int = None, + width: int = None, + padding_mask_crop: Optional[int] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Preprocess the image and mask. + """ + if mask is None and padding_mask_crop is not None: + raise ValueError("mask must be provided if padding_mask_crop is provided") + + # if mask is None, same behavior as regular image processor + if mask is None: + return self._image_processor.preprocess(image, height=height, width=width) + + if padding_mask_crop is not None: + crops_coords = self._image_processor.get_crop_region(mask, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + processed_image = self._image_processor.preprocess( + image, + height=height, + width=width, + crops_coords=crops_coords, + resize_mode=resize_mode, + ) + + processed_mask = self._mask_processor.preprocess( + mask, + height=height, + width=width, + resize_mode=resize_mode, + crops_coords=crops_coords, + ) + + if crops_coords is not None: + postprocessing_kwargs = { + "crops_coords": crops_coords, + "original_image": image, + "original_mask": mask, + } + else: + postprocessing_kwargs = { + "crops_coords": None, + "original_image": None, + "original_mask": None, + } + + return processed_image, processed_mask, postprocessing_kwargs + + def postprocess( + self, + image: torch.Tensor, + output_type: str = "pil", + original_image: Optional[PIL.Image.Image] = None, + original_mask: Optional[PIL.Image.Image] = None, + crops_coords: Optional[Tuple[int, int, int, int]] = None, + ) -> Tuple[PIL.Image.Image, PIL.Image.Image]: + """ + Postprocess the image, optionally apply mask overlay + """ + image = self._image_processor.postprocess( + image, + output_type=output_type, + ) + # optionally apply the mask overlay + if crops_coords is not None and (original_image is None or original_mask is None): + raise ValueError("original_image and original_mask must be provided if crops_coords is provided") + + elif crops_coords is not None and output_type != "pil": + raise ValueError("output_type must be 'pil' if crops_coords is provided") + + elif crops_coords is not None: + image = [ + self._image_processor.apply_overlay(original_mask, original_image, i, crops_coords) for i in image + ] + + return image + + class VaeImageProcessorLDM3D(VaeImageProcessor): """ Image processor for VAE LDM3D. diff --git a/src/diffusers/modular_pipelines/__init__.py b/src/diffusers/modular_pipelines/__init__.py index 68d707f9e0..65c22b349b 100644 --- a/src/diffusers/modular_pipelines/__init__.py +++ b/src/diffusers/modular_pipelines/__init__.py @@ -47,6 +47,12 @@ else: _import_structure["stable_diffusion_xl"] = ["StableDiffusionXLAutoBlocks", "StableDiffusionXLModularPipeline"] _import_structure["wan"] = ["WanAutoBlocks", "WanModularPipeline"] _import_structure["flux"] = ["FluxAutoBlocks", "FluxModularPipeline"] + _import_structure["qwenimage"] = [ + "QwenImageAutoBlocks", + "QwenImageModularPipeline", + "QwenImageEditModularPipeline", + "QwenImageEditAutoBlocks", + ] _import_structure["components_manager"] = ["ComponentsManager"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: @@ -68,6 +74,12 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: SequentialPipelineBlocks, ) from .modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, InsertableDict, OutputParam + from .qwenimage import ( + QwenImageAutoBlocks, + QwenImageEditAutoBlocks, + QwenImageEditModularPipeline, + QwenImageModularPipeline, + ) from .stable_diffusion_xl import StableDiffusionXLAutoBlocks, StableDiffusionXLModularPipeline from .wan import WanAutoBlocks, WanModularPipeline else: diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index c0524a1f86..78226a49b1 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -56,6 +56,8 @@ MODULAR_PIPELINE_MAPPING = OrderedDict( ("stable-diffusion-xl", "StableDiffusionXLModularPipeline"), ("wan", "WanModularPipeline"), ("flux", "FluxModularPipeline"), + ("qwenimage", "QwenImageModularPipeline"), + ("qwenimage-edit", "QwenImageEditModularPipeline"), ] ) @@ -64,6 +66,8 @@ MODULAR_PIPELINE_BLOCKS_MAPPING = OrderedDict( ("StableDiffusionXLModularPipeline", "StableDiffusionXLAutoBlocks"), ("WanModularPipeline", "WanAutoBlocks"), ("FluxModularPipeline", "FluxAutoBlocks"), + ("QwenImageModularPipeline", "QwenImageAutoBlocks"), + ("QwenImageEditModularPipeline", "QwenImageEditAutoBlocks"), ] ) @@ -133,8 +137,8 @@ class PipelineState: Allow attribute access to intermediate values. If an attribute is not found in the object, look for it in the intermediates dict. """ - if name in self.intermediates: - return self.intermediates[name] + if name in self.values: + return self.values[name] raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") def __repr__(self): @@ -548,8 +552,11 @@ class AutoPipelineBlocks(ModularPipelineBlocks): def __init__(self): sub_blocks = InsertableDict() - for block_name, block_cls in zip(self.block_names, self.block_classes): - sub_blocks[block_name] = block_cls() + for block_name, block in zip(self.block_names, self.block_classes): + if inspect.isclass(block): + sub_blocks[block_name] = block() + else: + sub_blocks[block_name] = block self.sub_blocks = sub_blocks if not (len(self.block_classes) == len(self.block_names) == len(self.block_trigger_inputs)): raise ValueError( @@ -830,7 +837,9 @@ class SequentialPipelineBlocks(ModularPipelineBlocks): return expected_configs @classmethod - def from_blocks_dict(cls, blocks_dict: Dict[str, Any]) -> "SequentialPipelineBlocks": + def from_blocks_dict( + cls, blocks_dict: Dict[str, Any], description: Optional[str] = None + ) -> "SequentialPipelineBlocks": """Creates a SequentialPipelineBlocks instance from a dictionary of blocks. Args: @@ -852,12 +861,19 @@ class SequentialPipelineBlocks(ModularPipelineBlocks): instance.block_classes = [block.__class__ for block in sub_blocks.values()] instance.block_names = list(sub_blocks.keys()) instance.sub_blocks = sub_blocks + + if description is not None: + instance.description = description + return instance def __init__(self): sub_blocks = InsertableDict() - for block_name, block_cls in zip(self.block_names, self.block_classes): - sub_blocks[block_name] = block_cls() + for block_name, block in zip(self.block_names, self.block_classes): + if inspect.isclass(block): + sub_blocks[block_name] = block() + else: + sub_blocks[block_name] = block self.sub_blocks = sub_blocks def _get_inputs(self): @@ -1280,8 +1296,11 @@ class LoopSequentialPipelineBlocks(ModularPipelineBlocks): def __init__(self): sub_blocks = InsertableDict() - for block_name, block_cls in zip(self.block_names, self.block_classes): - sub_blocks[block_name] = block_cls() + for block_name, block in zip(self.block_names, self.block_classes): + if inspect.isclass(block): + sub_blocks[block_name] = block() + else: + sub_blocks[block_name] = block self.sub_blocks = sub_blocks @classmethod diff --git a/src/diffusers/modular_pipelines/qwenimage/__init__.py b/src/diffusers/modular_pipelines/qwenimage/__init__.py new file mode 100644 index 0000000000..81cf515730 --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/__init__.py @@ -0,0 +1,75 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["encoders"] = ["QwenImageTextEncoderStep"] + _import_structure["modular_blocks"] = [ + "ALL_BLOCKS", + "AUTO_BLOCKS", + "CONTROLNET_BLOCKS", + "EDIT_AUTO_BLOCKS", + "EDIT_BLOCKS", + "EDIT_INPAINT_BLOCKS", + "IMAGE2IMAGE_BLOCKS", + "INPAINT_BLOCKS", + "TEXT2IMAGE_BLOCKS", + "QwenImageAutoBlocks", + "QwenImageEditAutoBlocks", + ] + _import_structure["modular_pipeline"] = ["QwenImageEditModularPipeline", "QwenImageModularPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .encoders import ( + QwenImageTextEncoderStep, + ) + from .modular_blocks import ( + ALL_BLOCKS, + AUTO_BLOCKS, + CONTROLNET_BLOCKS, + EDIT_AUTO_BLOCKS, + EDIT_BLOCKS, + EDIT_INPAINT_BLOCKS, + IMAGE2IMAGE_BLOCKS, + INPAINT_BLOCKS, + TEXT2IMAGE_BLOCKS, + QwenImageAutoBlocks, + QwenImageEditAutoBlocks, + ) + from .modular_pipeline import QwenImageEditModularPipeline, QwenImageModularPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/modular_pipelines/qwenimage/before_denoise.py b/src/diffusers/modular_pipelines/qwenimage/before_denoise.py new file mode 100644 index 0000000000..738a1e5d15 --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/before_denoise.py @@ -0,0 +1,727 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ...models import QwenImageControlNetModel, QwenImageMultiControlNetModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils.torch_utils import randn_tensor, unwrap_module +from ..modular_pipeline import ModularPipelineBlocks, PipelineState +from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam +from .modular_pipeline import QwenImageModularPipeline, QwenImagePachifier + + +# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.15, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +# modified from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps +def get_timesteps(scheduler, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(num_inference_steps * strength, num_inference_steps) + + t_start = int(max(num_inference_steps - init_timestep, 0)) + timesteps = scheduler.timesteps[t_start * scheduler.order :] + if hasattr(scheduler, "set_begin_index"): + scheduler.set_begin_index(t_start * scheduler.order) + + return timesteps, num_inference_steps - t_start + + +# Prepare Latents steps + + +class QwenImagePrepareLatentsStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Prepare initial random noise for the generation process" + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("pachifier", QwenImagePachifier, default_creation_method="from_config"), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="height"), + InputParam(name="width"), + InputParam(name="num_images_per_prompt", default=1), + InputParam(name="generator"), + InputParam( + name="batch_size", + required=True, + type_hint=int, + description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", + ), + InputParam( + name="dtype", + required=True, + type_hint=torch.dtype, + description="The dtype of the model inputs, can be generated in input step.", + ), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="latents", + type_hint=torch.Tensor, + description="The initial latents to use for the denoising process", + ), + ] + + @staticmethod + def check_inputs(height, width, vae_scale_factor): + if height is not None and height % (vae_scale_factor * 2) != 0: + raise ValueError(f"Height must be divisible by {vae_scale_factor * 2} but is {height}") + + if width is not None and width % (vae_scale_factor * 2) != 0: + raise ValueError(f"Width must be divisible by {vae_scale_factor * 2} but is {width}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + self.check_inputs( + height=block_state.height, + width=block_state.width, + vae_scale_factor=components.vae_scale_factor, + ) + + device = components._execution_device + batch_size = block_state.batch_size * block_state.num_images_per_prompt + + # we can update the height and width here since it's used to generate the initial + block_state.height = block_state.height or components.default_height + block_state.width = block_state.width or components.default_width + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + latent_height = 2 * (int(block_state.height) // (components.vae_scale_factor * 2)) + latent_width = 2 * (int(block_state.width) // (components.vae_scale_factor * 2)) + + shape = (batch_size, components.num_channels_latents, 1, latent_height, latent_width) + if isinstance(block_state.generator, list) and len(block_state.generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(block_state.generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + block_state.latents = randn_tensor( + shape, generator=block_state.generator, device=device, dtype=block_state.dtype + ) + block_state.latents = components.pachifier.pack_latents(block_state.latents) + + self.set_block_state(state, block_state) + + return components, state + + +class QwenImagePrepareLatentsWithStrengthStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Step that adds noise to image latents for image-to-image/inpainting. Should be run after set_timesteps, prepare_latents. Both noise and image latents should alreadybe patchified." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam( + name="latents", + required=True, + type_hint=torch.Tensor, + description="The initial random noised, can be generated in prepare latent step.", + ), + InputParam( + name="image_latents", + required=True, + type_hint=torch.Tensor, + description="The image latents to use for the denoising process. Can be generated in vae encoder and packed in input step.", + ), + InputParam( + name="timesteps", + required=True, + type_hint=torch.Tensor, + description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", + ), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="initial_noise", + type_hint=torch.Tensor, + description="The initial random noised used for inpainting denoising.", + ), + ] + + @staticmethod + def check_inputs(image_latents, latents): + if image_latents.shape[0] != latents.shape[0]: + raise ValueError( + f"`image_latents` must have have same batch size as `latents`, but got {image_latents.shape[0]} and {latents.shape[0]}" + ) + + if image_latents.ndim != 3: + raise ValueError(f"`image_latents` must have 3 dimensions (patchified), but got {image_latents.ndim}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + self.check_inputs( + image_latents=block_state.image_latents, + latents=block_state.latents, + ) + + # prepare latent timestep + latent_timestep = block_state.timesteps[:1].repeat(block_state.latents.shape[0]) + + # make copy of initial_noise + block_state.initial_noise = block_state.latents + + # scale noise + block_state.latents = components.scheduler.scale_noise( + block_state.image_latents, latent_timestep, block_state.latents + ) + + self.set_block_state(state, block_state) + + return components, state + + +class QwenImageCreateMaskLatentsStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Step that creates mask latents from preprocessed mask_image by interpolating to latent space." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("pachifier", QwenImagePachifier, default_creation_method="from_config"), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam( + name="processed_mask_image", + required=True, + type_hint=torch.Tensor, + description="The processed mask to use for the inpainting process.", + ), + InputParam(name="height", required=True), + InputParam(name="width", required=True), + InputParam(name="dtype", required=True), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="mask", type_hint=torch.Tensor, description="The mask to use for the inpainting process." + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + device = components._execution_device + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + + height_latents = 2 * (int(block_state.height) // (components.vae_scale_factor * 2)) + width_latents = 2 * (int(block_state.width) // (components.vae_scale_factor * 2)) + + block_state.mask = torch.nn.functional.interpolate( + block_state.processed_mask_image, + size=(height_latents, width_latents), + ) + + block_state.mask = block_state.mask.unsqueeze(2) + block_state.mask = block_state.mask.repeat(1, components.num_channels_latents, 1, 1, 1) + block_state.mask = block_state.mask.to(device=device, dtype=block_state.dtype) + + block_state.mask = components.pachifier.pack_latents(block_state.mask) + + self.set_block_state(state, block_state) + + return components, state + + +# Set Timesteps steps + + +class QwenImageSetTimestepsStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Step that sets the the scheduler's timesteps for text-to-image generation. Should be run after prepare latents step." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="num_inference_steps", default=50), + InputParam(name="sigmas"), + InputParam( + name="latents", + required=True, + type_hint=torch.Tensor, + description="The latents to use for the denoising process, used to calculate the image sequence length.", + ), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="timesteps", type_hint=torch.Tensor, description="The timesteps to use for the denoising process" + ), + ] + + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + device = components._execution_device + sigmas = ( + np.linspace(1.0, 1 / block_state.num_inference_steps, block_state.num_inference_steps) + if block_state.sigmas is None + else block_state.sigmas + ) + + mu = calculate_shift( + image_seq_len=block_state.latents.shape[1], + base_seq_len=components.scheduler.config.get("base_image_seq_len", 256), + max_seq_len=components.scheduler.config.get("max_image_seq_len", 4096), + base_shift=components.scheduler.config.get("base_shift", 0.5), + max_shift=components.scheduler.config.get("max_shift", 1.15), + ) + block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps( + scheduler=components.scheduler, + num_inference_steps=block_state.num_inference_steps, + device=device, + sigmas=sigmas, + mu=mu, + ) + + components.scheduler.set_begin_index(0) + + self.set_block_state(state, block_state) + + return components, state + + +class QwenImageSetTimestepsWithStrengthStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Step that sets the the scheduler's timesteps for image-to-image generation, and inpainting. Should be run after prepare latents step." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="num_inference_steps", default=50), + InputParam(name="sigmas"), + InputParam( + name="latents", + required=True, + type_hint=torch.Tensor, + description="The latents to use for the denoising process, used to calculate the image sequence length.", + ), + InputParam(name="strength", default=0.9), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="timesteps", + type_hint=torch.Tensor, + description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", + ), + ] + + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + device = components._execution_device + sigmas = ( + np.linspace(1.0, 1 / block_state.num_inference_steps, block_state.num_inference_steps) + if block_state.sigmas is None + else block_state.sigmas + ) + + mu = calculate_shift( + image_seq_len=block_state.latents.shape[1], + base_seq_len=components.scheduler.config.get("base_image_seq_len", 256), + max_seq_len=components.scheduler.config.get("max_image_seq_len", 4096), + base_shift=components.scheduler.config.get("base_shift", 0.5), + max_shift=components.scheduler.config.get("max_shift", 1.15), + ) + block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps( + scheduler=components.scheduler, + num_inference_steps=block_state.num_inference_steps, + device=device, + sigmas=sigmas, + mu=mu, + ) + + block_state.timesteps, block_state.num_inference_steps = get_timesteps( + scheduler=components.scheduler, + num_inference_steps=block_state.num_inference_steps, + strength=block_state.strength, + ) + + self.set_block_state(state, block_state) + + return components, state + + +# other inputs for denoiser + +## RoPE inputs for denoiser + + +class QwenImageRoPEInputsStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "Step that prepares the RoPE inputs for the denoising process. Should be place after prepare_latents step" + ) + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="batch_size", required=True), + InputParam(name="height", required=True), + InputParam(name="width", required=True), + InputParam(name="prompt_embeds_mask"), + InputParam(name="negative_prompt_embeds_mask"), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="img_shapes", + type_hint=List[List[Tuple[int, int, int]]], + description="The shapes of the images latents, used for RoPE calculation", + ), + OutputParam( + name="txt_seq_lens", + kwargs_type="denoiser_input_fields", + type_hint=List[int], + description="The sequence lengths of the prompt embeds, used for RoPE calculation", + ), + OutputParam( + name="negative_txt_seq_lens", + kwargs_type="denoiser_input_fields", + type_hint=List[int], + description="The sequence lengths of the negative prompt embeds, used for RoPE calculation", + ), + ] + + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + block_state.img_shapes = [ + [ + ( + 1, + block_state.height // components.vae_scale_factor // 2, + block_state.width // components.vae_scale_factor // 2, + ) + ] + * block_state.batch_size + ] + block_state.txt_seq_lens = ( + block_state.prompt_embeds_mask.sum(dim=1).tolist() if block_state.prompt_embeds_mask is not None else None + ) + block_state.negative_txt_seq_lens = ( + block_state.negative_prompt_embeds_mask.sum(dim=1).tolist() + if block_state.negative_prompt_embeds_mask is not None + else None + ) + + self.set_block_state(state, block_state) + + return components, state + + +class QwenImageEditRoPEInputsStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Step that prepares the RoPE inputs for denoising process. This is used in QwenImage Edit. Should be place after prepare_latents step" + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="batch_size", required=True), + InputParam( + name="resized_image", required=True, type_hint=torch.Tensor, description="The resized image input" + ), + InputParam(name="height", required=True), + InputParam(name="width", required=True), + InputParam(name="prompt_embeds_mask"), + InputParam(name="negative_prompt_embeds_mask"), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="img_shapes", + type_hint=List[List[Tuple[int, int, int]]], + description="The shapes of the images latents, used for RoPE calculation", + ), + OutputParam( + name="txt_seq_lens", + kwargs_type="denoiser_input_fields", + type_hint=List[int], + description="The sequence lengths of the prompt embeds, used for RoPE calculation", + ), + OutputParam( + name="negative_txt_seq_lens", + kwargs_type="denoiser_input_fields", + type_hint=List[int], + description="The sequence lengths of the negative prompt embeds, used for RoPE calculation", + ), + ] + + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + # for edit, image size can be different from the target size (height/width) + image = ( + block_state.resized_image[0] if isinstance(block_state.resized_image, list) else block_state.resized_image + ) + image_width, image_height = image.size + + block_state.img_shapes = [ + [ + ( + 1, + block_state.height // components.vae_scale_factor // 2, + block_state.width // components.vae_scale_factor // 2, + ), + (1, image_height // components.vae_scale_factor // 2, image_width // components.vae_scale_factor // 2), + ] + ] * block_state.batch_size + + block_state.txt_seq_lens = ( + block_state.prompt_embeds_mask.sum(dim=1).tolist() if block_state.prompt_embeds_mask is not None else None + ) + block_state.negative_txt_seq_lens = ( + block_state.negative_prompt_embeds_mask.sum(dim=1).tolist() + if block_state.negative_prompt_embeds_mask is not None + else None + ) + + self.set_block_state(state, block_state) + + return components, state + + +## ControlNet inputs for denoiser +class QwenImageControlNetBeforeDenoiserStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("controlnet", QwenImageControlNetModel), + ] + + @property + def description(self) -> str: + return "step that prepare inputs for controlnet. Insert before the Denoise Step, after set_timesteps step." + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam("control_guidance_start", default=0.0), + InputParam("control_guidance_end", default=1.0), + InputParam("controlnet_conditioning_scale", default=1.0), + InputParam("control_image_latents", required=True), + InputParam( + "timesteps", + required=True, + type_hint=torch.Tensor, + description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", + ), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam("controlnet_keep", type_hint=List[float], description="The controlnet keep values"), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + controlnet = unwrap_module(components.controlnet) + + # control_guidance_start/control_guidance_end (align format) + if not isinstance(block_state.control_guidance_start, list) and isinstance( + block_state.control_guidance_end, list + ): + block_state.control_guidance_start = len(block_state.control_guidance_end) * [ + block_state.control_guidance_start + ] + elif not isinstance(block_state.control_guidance_end, list) and isinstance( + block_state.control_guidance_start, list + ): + block_state.control_guidance_end = len(block_state.control_guidance_start) * [ + block_state.control_guidance_end + ] + elif not isinstance(block_state.control_guidance_start, list) and not isinstance( + block_state.control_guidance_end, list + ): + mult = ( + len(block_state.control_image_latents) if isinstance(controlnet, QwenImageMultiControlNetModel) else 1 + ) + block_state.control_guidance_start, block_state.control_guidance_end = ( + mult * [block_state.control_guidance_start], + mult * [block_state.control_guidance_end], + ) + + # controlnet_conditioning_scale (align format) + if isinstance(controlnet, QwenImageMultiControlNetModel) and isinstance( + block_state.controlnet_conditioning_scale, float + ): + block_state.controlnet_conditioning_scale = [block_state.controlnet_conditioning_scale] * mult + + # controlnet_keep + block_state.controlnet_keep = [] + for i in range(len(block_state.timesteps)): + keeps = [ + 1.0 - float(i / len(block_state.timesteps) < s or (i + 1) / len(block_state.timesteps) > e) + for s, e in zip(block_state.control_guidance_start, block_state.control_guidance_end) + ] + block_state.controlnet_keep.append(keeps[0] if isinstance(controlnet, QwenImageControlNetModel) else keeps) + + self.set_block_state(state, block_state) + + return components, state diff --git a/src/diffusers/modular_pipelines/qwenimage/decoders.py b/src/diffusers/modular_pipelines/qwenimage/decoders.py new file mode 100644 index 0000000000..6c82fe989e --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/decoders.py @@ -0,0 +1,203 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Union + +import numpy as np +import PIL +import torch + +from ...configuration_utils import FrozenDict +from ...image_processor import InpaintProcessor, VaeImageProcessor +from ...models import AutoencoderKLQwenImage +from ...utils import logging +from ..modular_pipeline import ModularPipelineBlocks, PipelineState +from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam +from .modular_pipeline import QwenImageModularPipeline, QwenImagePachifier + + +logger = logging.get_logger(__name__) + + +class QwenImageDecoderStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Step that decodes the latents to images" + + @property + def expected_components(self) -> List[ComponentSpec]: + components = [ + ComponentSpec("vae", AutoencoderKLQwenImage), + ComponentSpec("pachifier", QwenImagePachifier, default_creation_method="from_config"), + ] + + return components + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="height", required=True), + InputParam(name="width", required=True), + InputParam( + name="latents", + required=True, + type_hint=torch.Tensor, + description="The latents to decode, can be generated in the denoise step", + ), + ] + + @property + def intermediate_outputs(self) -> List[str]: + return [ + OutputParam( + "images", + type_hint=Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]], + description="The generated images, can be a PIL.Image.Image, torch.Tensor or a numpy array", + ) + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + # YiYi Notes: remove support for output_type = "latents', we can just skip decode/encode step in modular + block_state.latents = components.pachifier.unpack_latents( + block_state.latents, block_state.height, block_state.width + ) + block_state.latents = block_state.latents.to(components.vae.dtype) + + latents_mean = ( + torch.tensor(components.vae.config.latents_mean) + .view(1, components.vae.config.z_dim, 1, 1, 1) + .to(block_state.latents.device, block_state.latents.dtype) + ) + latents_std = 1.0 / torch.tensor(components.vae.config.latents_std).view( + 1, components.vae.config.z_dim, 1, 1, 1 + ).to(block_state.latents.device, block_state.latents.dtype) + block_state.latents = block_state.latents / latents_std + latents_mean + block_state.images = components.vae.decode(block_state.latents, return_dict=False)[0][:, :, 0] + + self.set_block_state(state, block_state) + return components, state + + +class QwenImageProcessImagesOutputStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "postprocess the generated image" + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "image_processor", + VaeImageProcessor, + config=FrozenDict({"vae_scale_factor": 16}), + default_creation_method="from_config", + ), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam("images", required=True, description="the generated image from decoders step"), + InputParam( + name="output_type", + default="pil", + type_hint=str, + description="The type of the output images, can be 'pil', 'np', 'pt'", + ), + ] + + @staticmethod + def check_inputs(output_type): + if output_type not in ["pil", "np", "pt"]: + raise ValueError(f"Invalid output_type: {output_type}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState): + block_state = self.get_block_state(state) + + self.check_inputs(block_state.output_type) + + block_state.images = components.image_processor.postprocess( + image=block_state.images, + output_type=block_state.output_type, + ) + + self.set_block_state(state, block_state) + return components, state + + +class QwenImageInpaintProcessImagesOutputStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "postprocess the generated image, optional apply the mask overally to the original image.." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "image_mask_processor", + InpaintProcessor, + config=FrozenDict({"vae_scale_factor": 16}), + default_creation_method="from_config", + ), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam("images", required=True, description="the generated image from decoders step"), + InputParam( + name="output_type", + default="pil", + type_hint=str, + description="The type of the output images, can be 'pil', 'np', 'pt'", + ), + InputParam("mask_overlay_kwargs"), + ] + + @staticmethod + def check_inputs(output_type, mask_overlay_kwargs): + if output_type not in ["pil", "np", "pt"]: + raise ValueError(f"Invalid output_type: {output_type}") + + if mask_overlay_kwargs and output_type != "pil": + raise ValueError("only support output_type 'pil' for mask overlay") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState): + block_state = self.get_block_state(state) + + self.check_inputs(block_state.output_type, block_state.mask_overlay_kwargs) + + if block_state.mask_overlay_kwargs is None: + mask_overlay_kwargs = {} + else: + mask_overlay_kwargs = block_state.mask_overlay_kwargs + + block_state.images = components.image_mask_processor.postprocess( + image=block_state.images, + **mask_overlay_kwargs, + ) + + self.set_block_state(state, block_state) + return components, state diff --git a/src/diffusers/modular_pipelines/qwenimage/denoise.py b/src/diffusers/modular_pipelines/qwenimage/denoise.py new file mode 100644 index 0000000000..d0704ee6e0 --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/denoise.py @@ -0,0 +1,668 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Tuple + +import torch + +from ...configuration_utils import FrozenDict +from ...guiders import ClassifierFreeGuidance +from ...models import QwenImageControlNetModel, QwenImageTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import logging +from ..modular_pipeline import BlockState, LoopSequentialPipelineBlocks, ModularPipelineBlocks, PipelineState +from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam +from .modular_pipeline import QwenImageModularPipeline + + +logger = logging.get_logger(__name__) + + +class QwenImageLoopBeforeDenoiser(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "step within the denoising loop that prepares the latent input for the denoiser. " + "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " + "object (e.g. `QwenImageDenoiseLoopWrapper`)" + ) + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam( + "latents", + required=True, + type_hint=torch.Tensor, + description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): + # one timestep + block_state.timestep = t.expand(block_state.latents.shape[0]).to(block_state.latents.dtype) + block_state.latent_model_input = block_state.latents + return components, block_state + + +class QwenImageEditLoopBeforeDenoiser(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "step within the denoising loop that prepares the latent input for the denoiser. " + "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " + "object (e.g. `QwenImageDenoiseLoopWrapper`)" + ) + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam( + "latents", + required=True, + type_hint=torch.Tensor, + description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", + ), + InputParam( + "image_latents", + required=True, + type_hint=torch.Tensor, + description="The initial image latents to use for the denoising process. Can be encoded in vae_encoder step and packed in prepare_image_latents step.", + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): + # one timestep + + block_state.latent_model_input = torch.cat([block_state.latents, block_state.image_latents], dim=1) + block_state.timestep = t.expand(block_state.latents.shape[0]).to(block_state.latents.dtype) + return components, block_state + + +class QwenImageLoopBeforeDenoiserControlNet(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "guider", + ClassifierFreeGuidance, + config=FrozenDict({"guidance_scale": 4.0}), + default_creation_method="from_config", + ), + ComponentSpec("controlnet", QwenImageControlNetModel), + ] + + @property + def description(self) -> str: + return ( + "step within the denoising loop that runs the controlnet before the denoiser. " + "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " + "object (e.g. `QwenImageDenoiseLoopWrapper`)" + ) + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam( + "control_image_latents", + required=True, + type_hint=torch.Tensor, + description="The control image to use for the denoising process. Can be generated in prepare_controlnet_inputs step.", + ), + InputParam( + "controlnet_conditioning_scale", + type_hint=float, + description="The controlnet conditioning scale value to use for the denoising process. Can be generated in prepare_controlnet_inputs step.", + ), + InputParam( + "controlnet_keep", + required=True, + type_hint=List[float], + description="The controlnet keep values to use for the denoising process. Can be generated in prepare_controlnet_inputs step.", + ), + InputParam( + "num_inference_steps", + required=True, + type_hint=int, + description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", + ), + InputParam( + kwargs_type="denoiser_input_fields", + description=( + "All conditional model inputs for the denoiser. " + "It should contain prompt_embeds/negative_prompt_embeds, txt_seq_lens/negative_txt_seq_lens." + ), + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, block_state: BlockState, i: int, t: int): + # cond_scale for the timestep (controlnet input) + if isinstance(block_state.controlnet_keep[i], list): + block_state.cond_scale = [ + c * s for c, s in zip(block_state.controlnet_conditioning_scale, block_state.controlnet_keep[i]) + ] + else: + controlnet_cond_scale = block_state.controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + block_state.cond_scale = controlnet_cond_scale * block_state.controlnet_keep[i] + + # run controlnet for the guidance batch + controlnet_block_samples = components.controlnet( + hidden_states=block_state.latent_model_input, + controlnet_cond=block_state.control_image_latents, + conditioning_scale=block_state.cond_scale, + timestep=block_state.timestep / 1000, + img_shapes=block_state.img_shapes, + encoder_hidden_states=block_state.prompt_embeds, + encoder_hidden_states_mask=block_state.prompt_embeds_mask, + txt_seq_lens=block_state.txt_seq_lens, + return_dict=False, + ) + + block_state.additional_cond_kwargs["controlnet_block_samples"] = controlnet_block_samples + + return components, block_state + + +class QwenImageLoopDenoiser(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "step within the denoising loop that denoise the latent input for the denoiser. " + "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " + "object (e.g. `QwenImageDenoiseLoopWrapper`)" + ) + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "guider", + ClassifierFreeGuidance, + config=FrozenDict({"guidance_scale": 4.0}), + default_creation_method="from_config", + ), + ComponentSpec("transformer", QwenImageTransformer2DModel), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam("attention_kwargs"), + InputParam( + "latents", + required=True, + type_hint=torch.Tensor, + description="The latents to use for the denoising process. Can be generated in prepare_latents step.", + ), + InputParam( + "num_inference_steps", + required=True, + type_hint=int, + description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", + ), + InputParam( + kwargs_type="denoiser_input_fields", + description="conditional model inputs for the denoiser: e.g. prompt_embeds, negative_prompt_embeds, etc.", + ), + InputParam( + "img_shapes", + required=True, + type_hint=List[Tuple[int, int]], + description="The shape of the image latents for RoPE calculation. Can be generated in prepare_additional_inputs step.", + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): + guider_input_fields = { + "encoder_hidden_states": ("prompt_embeds", "negative_prompt_embeds"), + "encoder_hidden_states_mask": ("prompt_embeds_mask", "negative_prompt_embeds_mask"), + "txt_seq_lens": ("txt_seq_lens", "negative_txt_seq_lens"), + } + + components.guider.set_state(step=i, num_inference_steps=block_state.num_inference_steps, timestep=t) + guider_state = components.guider.prepare_inputs(block_state, guider_input_fields) + + for guider_state_batch in guider_state: + components.guider.prepare_models(components.transformer) + cond_kwargs = guider_state_batch.as_dict() + cond_kwargs = {k: v for k, v in cond_kwargs.items() if k in guider_input_fields} + + # YiYi TODO: add cache context + guider_state_batch.noise_pred = components.transformer( + hidden_states=block_state.latent_model_input, + timestep=block_state.timestep / 1000, + img_shapes=block_state.img_shapes, + attention_kwargs=block_state.attention_kwargs, + return_dict=False, + **cond_kwargs, + **block_state.additional_cond_kwargs, + )[0] + + components.guider.cleanup_models(components.transformer) + + guider_output = components.guider(guider_state) + + # apply guidance rescale + pred_cond_norm = torch.norm(guider_output.pred_cond, dim=-1, keepdim=True) + pred_norm = torch.norm(guider_output.pred, dim=-1, keepdim=True) + block_state.noise_pred = guider_output.pred * (pred_cond_norm / pred_norm) + + return components, block_state + + +class QwenImageEditLoopDenoiser(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "step within the denoising loop that denoise the latent input for the denoiser. " + "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " + "object (e.g. `QwenImageDenoiseLoopWrapper`)" + ) + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "guider", + ClassifierFreeGuidance, + config=FrozenDict({"guidance_scale": 4.0}), + default_creation_method="from_config", + ), + ComponentSpec("transformer", QwenImageTransformer2DModel), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam("attention_kwargs"), + InputParam( + "latents", + required=True, + type_hint=torch.Tensor, + description="The latents to use for the denoising process. Can be generated in prepare_latents step.", + ), + InputParam( + "num_inference_steps", + required=True, + type_hint=int, + description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", + ), + InputParam( + kwargs_type="denoiser_input_fields", + description="conditional model inputs for the denoiser: e.g. prompt_embeds, negative_prompt_embeds, etc.", + ), + InputParam( + "img_shapes", + required=True, + type_hint=List[Tuple[int, int]], + description="The shape of the image latents for RoPE calculation. Can be generated in prepare_additional_inputs step.", + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): + guider_input_fields = { + "encoder_hidden_states": ("prompt_embeds", "negative_prompt_embeds"), + "encoder_hidden_states_mask": ("prompt_embeds_mask", "negative_prompt_embeds_mask"), + "txt_seq_lens": ("txt_seq_lens", "negative_txt_seq_lens"), + } + + components.guider.set_state(step=i, num_inference_steps=block_state.num_inference_steps, timestep=t) + guider_state = components.guider.prepare_inputs(block_state, guider_input_fields) + + for guider_state_batch in guider_state: + components.guider.prepare_models(components.transformer) + cond_kwargs = guider_state_batch.as_dict() + cond_kwargs = {k: v for k, v in cond_kwargs.items() if k in guider_input_fields} + + # YiYi TODO: add cache context + guider_state_batch.noise_pred = components.transformer( + hidden_states=block_state.latent_model_input, + timestep=block_state.timestep / 1000, + img_shapes=block_state.img_shapes, + attention_kwargs=block_state.attention_kwargs, + return_dict=False, + **cond_kwargs, + **block_state.additional_cond_kwargs, + )[0] + + components.guider.cleanup_models(components.transformer) + + guider_output = components.guider(guider_state) + + pred = guider_output.pred[:, : block_state.latents.size(1)] + pred_cond = guider_output.pred_cond[:, : block_state.latents.size(1)] + + # apply guidance rescale + pred_cond_norm = torch.norm(pred_cond, dim=-1, keepdim=True) + pred_norm = torch.norm(pred, dim=-1, keepdim=True) + block_state.noise_pred = pred * (pred_cond_norm / pred_norm) + + return components, block_state + + +class QwenImageLoopAfterDenoiser(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "step within the denoising loop that updates the latents. " + "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " + "object (e.g. `QwenImageDenoiseLoopWrapper`)" + ) + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam("latents", type_hint=torch.Tensor, description="The denoised latents."), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): + latents_dtype = block_state.latents.dtype + block_state.latents = components.scheduler.step( + block_state.noise_pred, + t, + block_state.latents, + return_dict=False, + )[0] + + if block_state.latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + block_state.latents = block_state.latents.to(latents_dtype) + + return components, block_state + + +class QwenImageLoopAfterDenoiserInpaint(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "step within the denoising loop that updates the latents using mask and image_latents for inpainting. " + "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " + "object (e.g. `QwenImageDenoiseLoopWrapper`)" + ) + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam( + "mask", + required=True, + type_hint=torch.Tensor, + description="The mask to use for the inpainting process. Can be generated in inpaint prepare latents step.", + ), + InputParam( + "image_latents", + required=True, + type_hint=torch.Tensor, + description="The image latents to use for the inpainting process. Can be generated in inpaint prepare latents step.", + ), + InputParam( + "initial_noise", + required=True, + type_hint=torch.Tensor, + description="The initial noise to use for the inpainting process. Can be generated in inpaint prepare latents step.", + ), + InputParam( + "timesteps", + required=True, + type_hint=torch.Tensor, + description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): + block_state.init_latents_proper = block_state.image_latents + if i < len(block_state.timesteps) - 1: + block_state.noise_timestep = block_state.timesteps[i + 1] + block_state.init_latents_proper = components.scheduler.scale_noise( + block_state.init_latents_proper, torch.tensor([block_state.noise_timestep]), block_state.initial_noise + ) + + block_state.latents = ( + 1 - block_state.mask + ) * block_state.init_latents_proper + block_state.mask * block_state.latents + + return components, block_state + + +class QwenImageDenoiseLoopWrapper(LoopSequentialPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return ( + "Pipeline block that iteratively denoise the latents over `timesteps`. " + "The specific steps with each iteration can be customized with `sub_blocks` attributes" + ) + + @property + def loop_expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), + ] + + @property + def loop_inputs(self) -> List[InputParam]: + return [ + InputParam( + "timesteps", + required=True, + type_hint=torch.Tensor, + description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", + ), + InputParam( + "num_inference_steps", + required=True, + type_hint=int, + description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + block_state.num_warmup_steps = max( + len(block_state.timesteps) - block_state.num_inference_steps * components.scheduler.order, 0 + ) + + block_state.additional_cond_kwargs = {} + + with self.progress_bar(total=block_state.num_inference_steps) as progress_bar: + for i, t in enumerate(block_state.timesteps): + components, block_state = self.loop_step(components, block_state, i=i, t=t) + if i == len(block_state.timesteps) - 1 or ( + (i + 1) > block_state.num_warmup_steps and (i + 1) % components.scheduler.order == 0 + ): + progress_bar.update() + + self.set_block_state(state, block_state) + + return components, state + + +# composing the denoising loops +class QwenImageDenoiseStep(QwenImageDenoiseLoopWrapper): + block_classes = [ + QwenImageLoopBeforeDenoiser, + QwenImageLoopDenoiser, + QwenImageLoopAfterDenoiser, + ] + block_names = ["before_denoiser", "denoiser", "after_denoiser"] + + @property + def description(self) -> str: + return ( + "Denoise step that iteratively denoise the latents. \n" + "Its loop logic is defined in `QwenImageDenoiseLoopWrapper.__call__` method \n" + "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + " - `QwenImageLoopBeforeDenoiser`\n" + " - `QwenImageLoopDenoiser`\n" + " - `QwenImageLoopAfterDenoiser`\n" + "This block supports text2image and image2image tasks for QwenImage." + ) + + +# composing the inpainting denoising loops +class QwenImageInpaintDenoiseStep(QwenImageDenoiseLoopWrapper): + block_classes = [ + QwenImageLoopBeforeDenoiser, + QwenImageLoopDenoiser, + QwenImageLoopAfterDenoiser, + QwenImageLoopAfterDenoiserInpaint, + ] + block_names = ["before_denoiser", "denoiser", "after_denoiser", "after_denoiser_inpaint"] + + @property + def description(self) -> str: + return ( + "Denoise step that iteratively denoise the latents. \n" + "Its loop logic is defined in `QwenImageDenoiseLoopWrapper.__call__` method \n" + "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + " - `QwenImageLoopBeforeDenoiser`\n" + " - `QwenImageLoopDenoiser`\n" + " - `QwenImageLoopAfterDenoiser`\n" + " - `QwenImageLoopAfterDenoiserInpaint`\n" + "This block supports inpainting tasks for QwenImage." + ) + + +# composing the controlnet denoising loops +class QwenImageControlNetDenoiseStep(QwenImageDenoiseLoopWrapper): + block_classes = [ + QwenImageLoopBeforeDenoiser, + QwenImageLoopBeforeDenoiserControlNet, + QwenImageLoopDenoiser, + QwenImageLoopAfterDenoiser, + ] + block_names = ["before_denoiser", "before_denoiser_controlnet", "denoiser", "after_denoiser"] + + @property + def description(self) -> str: + return ( + "Denoise step that iteratively denoise the latents. \n" + "Its loop logic is defined in `QwenImageDenoiseLoopWrapper.__call__` method \n" + "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + " - `QwenImageLoopBeforeDenoiser`\n" + " - `QwenImageLoopBeforeDenoiserControlNet`\n" + " - `QwenImageLoopDenoiser`\n" + " - `QwenImageLoopAfterDenoiser`\n" + "This block supports text2img/img2img tasks with controlnet for QwenImage." + ) + + +# composing the controlnet denoising loops +class QwenImageInpaintControlNetDenoiseStep(QwenImageDenoiseLoopWrapper): + block_classes = [ + QwenImageLoopBeforeDenoiser, + QwenImageLoopBeforeDenoiserControlNet, + QwenImageLoopDenoiser, + QwenImageLoopAfterDenoiser, + QwenImageLoopAfterDenoiserInpaint, + ] + block_names = [ + "before_denoiser", + "before_denoiser_controlnet", + "denoiser", + "after_denoiser", + "after_denoiser_inpaint", + ] + + @property + def description(self) -> str: + return ( + "Denoise step that iteratively denoise the latents. \n" + "Its loop logic is defined in `QwenImageDenoiseLoopWrapper.__call__` method \n" + "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + " - `QwenImageLoopBeforeDenoiser`\n" + " - `QwenImageLoopBeforeDenoiserControlNet`\n" + " - `QwenImageLoopDenoiser`\n" + " - `QwenImageLoopAfterDenoiser`\n" + " - `QwenImageLoopAfterDenoiserInpaint`\n" + "This block supports inpainting tasks with controlnet for QwenImage." + ) + + +# composing the denoising loops +class QwenImageEditDenoiseStep(QwenImageDenoiseLoopWrapper): + block_classes = [ + QwenImageEditLoopBeforeDenoiser, + QwenImageEditLoopDenoiser, + QwenImageLoopAfterDenoiser, + ] + block_names = ["before_denoiser", "denoiser", "after_denoiser"] + + @property + def description(self) -> str: + return ( + "Denoise step that iteratively denoise the latents. \n" + "Its loop logic is defined in `QwenImageDenoiseLoopWrapper.__call__` method \n" + "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + " - `QwenImageEditLoopBeforeDenoiser`\n" + " - `QwenImageEditLoopDenoiser`\n" + " - `QwenImageLoopAfterDenoiser`\n" + "This block supports QwenImage Edit." + ) + + +class QwenImageEditInpaintDenoiseStep(QwenImageDenoiseLoopWrapper): + block_classes = [ + QwenImageEditLoopBeforeDenoiser, + QwenImageEditLoopDenoiser, + QwenImageLoopAfterDenoiser, + QwenImageLoopAfterDenoiserInpaint, + ] + block_names = ["before_denoiser", "denoiser", "after_denoiser", "after_denoiser_inpaint"] + + @property + def description(self) -> str: + return ( + "Denoise step that iteratively denoise the latents. \n" + "Its loop logic is defined in `QwenImageDenoiseLoopWrapper.__call__` method \n" + "At each iteration, it runs blocks defined in `sub_blocks` sequencially:\n" + " - `QwenImageEditLoopBeforeDenoiser`\n" + " - `QwenImageEditLoopDenoiser`\n" + " - `QwenImageLoopAfterDenoiser`\n" + " - `QwenImageLoopAfterDenoiserInpaint`\n" + "This block supports inpainting tasks for QwenImage Edit." + ) diff --git a/src/diffusers/modular_pipelines/qwenimage/encoders.py b/src/diffusers/modular_pipelines/qwenimage/encoders.py new file mode 100644 index 0000000000..280fa6a152 --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/encoders.py @@ -0,0 +1,857 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Optional, Union + +import PIL +import torch +from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor + +from ...configuration_utils import FrozenDict +from ...guiders import ClassifierFreeGuidance +from ...image_processor import InpaintProcessor, VaeImageProcessor, is_valid_image, is_valid_image_imagelist +from ...models import AutoencoderKLQwenImage, QwenImageControlNetModel, QwenImageMultiControlNetModel +from ...pipelines.qwenimage.pipeline_qwenimage_edit import calculate_dimensions +from ...utils import logging +from ...utils.torch_utils import unwrap_module +from ..modular_pipeline import ModularPipelineBlocks, PipelineState +from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam +from .modular_pipeline import QwenImageModularPipeline + + +logger = logging.get_logger(__name__) + + +def _extract_masked_hidden(hidden_states: torch.Tensor, mask: torch.Tensor): + bool_mask = mask.bool() + valid_lengths = bool_mask.sum(dim=1) + selected = hidden_states[bool_mask] + split_result = torch.split(selected, valid_lengths.tolist(), dim=0) + return split_result + + +def get_qwen_prompt_embeds( + text_encoder, + tokenizer, + prompt: Union[str, List[str]] = None, + prompt_template_encode: str = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n", + prompt_template_encode_start_idx: int = 34, + tokenizer_max_length: int = 1024, + device: Optional[torch.device] = None, +): + prompt = [prompt] if isinstance(prompt, str) else prompt + + template = prompt_template_encode + drop_idx = prompt_template_encode_start_idx + txt = [template.format(e) for e in prompt] + txt_tokens = tokenizer( + txt, max_length=tokenizer_max_length + drop_idx, padding=True, truncation=True, return_tensors="pt" + ).to(device) + encoder_hidden_states = text_encoder( + input_ids=txt_tokens.input_ids, + attention_mask=txt_tokens.attention_mask, + output_hidden_states=True, + ) + hidden_states = encoder_hidden_states.hidden_states[-1] + + split_hidden_states = _extract_masked_hidden(hidden_states, txt_tokens.attention_mask) + split_hidden_states = [e[drop_idx:] for e in split_hidden_states] + attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states] + max_seq_len = max([e.size(0) for e in split_hidden_states]) + prompt_embeds = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states] + ) + encoder_attention_mask = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list] + ) + + prompt_embeds = prompt_embeds.to(device=device) + + return prompt_embeds, encoder_attention_mask + + +def get_qwen_prompt_embeds_edit( + text_encoder, + processor, + prompt: Union[str, List[str]] = None, + image: Optional[torch.Tensor] = None, + prompt_template_encode: str = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n", + prompt_template_encode_start_idx: int = 64, + device: Optional[torch.device] = None, +): + prompt = [prompt] if isinstance(prompt, str) else prompt + + template = prompt_template_encode + drop_idx = prompt_template_encode_start_idx + txt = [template.format(e) for e in prompt] + + model_inputs = processor( + text=txt, + images=image, + padding=True, + return_tensors="pt", + ).to(device) + + outputs = text_encoder( + input_ids=model_inputs.input_ids, + attention_mask=model_inputs.attention_mask, + pixel_values=model_inputs.pixel_values, + image_grid_thw=model_inputs.image_grid_thw, + output_hidden_states=True, + ) + + hidden_states = outputs.hidden_states[-1] + split_hidden_states = _extract_masked_hidden(hidden_states, model_inputs.attention_mask) + split_hidden_states = [e[drop_idx:] for e in split_hidden_states] + attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states] + max_seq_len = max([e.size(0) for e in split_hidden_states]) + prompt_embeds = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states] + ) + encoder_attention_mask = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list] + ) + + prompt_embeds = prompt_embeds.to(device=device) + + return prompt_embeds, encoder_attention_mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Modified from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._encode_vae_image +def encode_vae_image( + image: torch.Tensor, + vae: AutoencoderKLQwenImage, + generator: torch.Generator, + device: torch.device, + dtype: torch.dtype, + latent_channels: int = 16, + sample_mode: str = "argmax", +): + if not isinstance(image, torch.Tensor): + raise ValueError(f"Expected image to be a tensor, got {type(image)}.") + + # preprocessed image should be a 4D tensor: batch_size, num_channels, height, width + if image.dim() == 4: + image = image.unsqueeze(2) + elif image.dim() != 5: + raise ValueError(f"Expected image dims 4 or 5, got {image.dim()}.") + + image = image.to(device=device, dtype=dtype) + + if isinstance(generator, list): + image_latents = [ + retrieve_latents(vae.encode(image[i : i + 1]), generator=generator[i], sample_mode=sample_mode) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(vae.encode(image), generator=generator, sample_mode=sample_mode) + latents_mean = ( + torch.tensor(vae.config.latents_mean) + .view(1, latent_channels, 1, 1, 1) + .to(image_latents.device, image_latents.dtype) + ) + latents_std = ( + torch.tensor(vae.config.latents_std) + .view(1, latent_channels, 1, 1, 1) + .to(image_latents.device, image_latents.dtype) + ) + image_latents = (image_latents - latents_mean) / latents_std + + return image_latents + + +class QwenImageEditResizeDynamicStep(ModularPipelineBlocks): + model_name = "qwenimage" + + def __init__(self, input_name: str = "image", output_name: str = "resized_image"): + """Create a configurable step for resizing images to the target area (1024 * 1024) while maintaining the aspect ratio. + + This block resizes an input image tensor and exposes the resized result under configurable input and output + names. Use this when you need to wire the resize step to different image fields (e.g., "image", + "control_image") + + Args: + input_name (str, optional): Name of the image field to read from the + pipeline state. Defaults to "image". + output_name (str, optional): Name of the resized image field to write + back to the pipeline state. Defaults to "resized_image". + """ + if not isinstance(input_name, str) or not isinstance(output_name, str): + raise ValueError( + f"input_name and output_name must be strings but are {type(input_name)} and {type(output_name)}" + ) + self._image_input_name = input_name + self._resized_image_output_name = output_name + super().__init__() + + @property + def description(self) -> str: + return f"Image Resize step that resize the {self._image_input_name} to the target area (1024 * 1024) while maintaining the aspect ratio." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "image_resize_processor", + VaeImageProcessor, + config=FrozenDict({"vae_scale_factor": 16}), + default_creation_method="from_config", + ), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam( + name=self._image_input_name, required=True, type_hint=torch.Tensor, description="The image to resize" + ), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name=self._resized_image_output_name, type_hint=List[PIL.Image.Image], description="The resized images" + ), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState): + block_state = self.get_block_state(state) + + images = getattr(block_state, self._image_input_name) + + if not is_valid_image_imagelist(images): + raise ValueError(f"Images must be image or list of images but are {type(images)}") + + if is_valid_image(images): + images = [images] + + image_width, image_height = images[0].size + calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, image_width / image_height) + + resized_images = [ + components.image_resize_processor.resize(image, height=calculated_height, width=calculated_width) + for image in images + ] + + setattr(block_state, self._resized_image_output_name, resized_images) + self.set_block_state(state, block_state) + return components, state + + +class QwenImageTextEncoderStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Text Encoder step that generate text_embeddings to guide the image generation" + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("text_encoder", Qwen2_5_VLForConditionalGeneration, description="The text encoder to use"), + ComponentSpec("tokenizer", Qwen2Tokenizer, description="The tokenizer to use"), + ComponentSpec( + "guider", + ClassifierFreeGuidance, + config=FrozenDict({"guidance_scale": 4.0}), + default_creation_method="from_config", + ), + ] + + @property + def expected_configs(self) -> List[ConfigSpec]: + return [ + ConfigSpec( + name="prompt_template_encode", + default="<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n", + ), + ConfigSpec(name="prompt_template_encode_start_idx", default=34), + ConfigSpec(name="tokenizer_max_length", default=1024), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="prompt", required=True, type_hint=str, description="The prompt to encode"), + InputParam(name="negative_prompt", type_hint=str, description="The negative prompt to encode"), + InputParam( + name="max_sequence_length", type_hint=int, description="The max sequence length to use", default=1024 + ), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="prompt_embeds", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The prompt embeddings", + ), + OutputParam( + name="prompt_embeds_mask", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The encoder attention mask", + ), + OutputParam( + name="negative_prompt_embeds", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The negative prompt embeddings", + ), + OutputParam( + name="negative_prompt_embeds_mask", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The negative prompt embeddings mask", + ), + ] + + @staticmethod + def check_inputs(prompt, negative_prompt, max_sequence_length): + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if ( + negative_prompt is not None + and not isinstance(negative_prompt, str) + and not isinstance(negative_prompt, list) + ): + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + if max_sequence_length is not None and max_sequence_length > 1024: + raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState): + block_state = self.get_block_state(state) + + device = components._execution_device + self.check_inputs(block_state.prompt, block_state.negative_prompt, block_state.max_sequence_length) + + block_state.prompt_embeds, block_state.prompt_embeds_mask = get_qwen_prompt_embeds( + components.text_encoder, + components.tokenizer, + prompt=block_state.prompt, + prompt_template_encode=components.config.prompt_template_encode, + prompt_template_encode_start_idx=components.config.prompt_template_encode_start_idx, + tokenizer_max_length=components.config.tokenizer_max_length, + device=device, + ) + + block_state.prompt_embeds = block_state.prompt_embeds[:, : block_state.max_sequence_length] + block_state.prompt_embeds_mask = block_state.prompt_embeds_mask[:, : block_state.max_sequence_length] + + if components.requires_unconditional_embeds: + negative_prompt = block_state.negative_prompt or "" + block_state.negative_prompt_embeds, block_state.negative_prompt_embeds_mask = get_qwen_prompt_embeds( + components.text_encoder, + components.tokenizer, + prompt=negative_prompt, + prompt_template_encode=components.config.prompt_template_encode, + prompt_template_encode_start_idx=components.config.prompt_template_encode_start_idx, + tokenizer_max_length=components.config.tokenizer_max_length, + device=device, + ) + block_state.negative_prompt_embeds = block_state.negative_prompt_embeds[ + :, : block_state.max_sequence_length + ] + block_state.negative_prompt_embeds_mask = block_state.negative_prompt_embeds_mask[ + :, : block_state.max_sequence_length + ] + + self.set_block_state(state, block_state) + return components, state + + +class QwenImageEditTextEncoderStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Text Encoder step that processes both prompt and image together to generate text embeddings for guiding image generation" + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("text_encoder", Qwen2_5_VLForConditionalGeneration), + ComponentSpec("processor", Qwen2VLProcessor), + ComponentSpec( + "guider", + ClassifierFreeGuidance, + config=FrozenDict({"guidance_scale": 4.0}), + default_creation_method="from_config", + ), + ] + + @property + def expected_configs(self) -> List[ConfigSpec]: + return [ + ConfigSpec( + name="prompt_template_encode", + default="<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n", + ), + ConfigSpec(name="prompt_template_encode_start_idx", default=64), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="prompt", required=True, type_hint=str, description="The prompt to encode"), + InputParam(name="negative_prompt", type_hint=str, description="The negative prompt to encode"), + InputParam( + name="resized_image", + required=True, + type_hint=torch.Tensor, + description="The image prompt to encode, should be resized using resize step", + ), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + name="prompt_embeds", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The prompt embeddings", + ), + OutputParam( + name="prompt_embeds_mask", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The encoder attention mask", + ), + OutputParam( + name="negative_prompt_embeds", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The negative prompt embeddings", + ), + OutputParam( + name="negative_prompt_embeds_mask", + kwargs_type="denoiser_input_fields", + type_hint=torch.Tensor, + description="The negative prompt embeddings mask", + ), + ] + + @staticmethod + def check_inputs(prompt, negative_prompt): + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if ( + negative_prompt is not None + and not isinstance(negative_prompt, str) + and not isinstance(negative_prompt, list) + ): + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState): + block_state = self.get_block_state(state) + + self.check_inputs(block_state.prompt, block_state.negative_prompt) + + device = components._execution_device + + block_state.prompt_embeds, block_state.prompt_embeds_mask = get_qwen_prompt_embeds_edit( + components.text_encoder, + components.processor, + prompt=block_state.prompt, + image=block_state.resized_image, + prompt_template_encode=components.config.prompt_template_encode, + prompt_template_encode_start_idx=components.config.prompt_template_encode_start_idx, + device=device, + ) + + if components.requires_unconditional_embeds: + negative_prompt = block_state.negative_prompt or "" + block_state.negative_prompt_embeds, block_state.negative_prompt_embeds_mask = get_qwen_prompt_embeds_edit( + components.text_encoder, + components.processor, + prompt=negative_prompt, + image=block_state.resized_image, + prompt_template_encode=components.config.prompt_template_encode, + prompt_template_encode_start_idx=components.config.prompt_template_encode_start_idx, + device=device, + ) + + self.set_block_state(state, block_state) + return components, state + + +class QwenImageInpaintProcessImagesInputStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Image Preprocess step for inpainting task. This processes the image and mask inputs together. Images can be resized first using QwenImageEditResizeDynamicStep." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "image_mask_processor", + InpaintProcessor, + config=FrozenDict({"vae_scale_factor": 16}), + default_creation_method="from_config", + ), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam("mask_image", required=True), + InputParam("resized_image"), + InputParam("image"), + InputParam("height"), + InputParam("width"), + InputParam("padding_mask_crop"), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam(name="processed_image"), + OutputParam(name="processed_mask_image"), + OutputParam( + name="mask_overlay_kwargs", + type_hint=Dict, + description="The kwargs for the postprocess step to apply the mask overlay", + ), + ] + + @staticmethod + def check_inputs(height, width, vae_scale_factor): + if height is not None and height % (vae_scale_factor * 2) != 0: + raise ValueError(f"Height must be divisible by {vae_scale_factor * 2} but is {height}") + + if width is not None and width % (vae_scale_factor * 2) != 0: + raise ValueError(f"Width must be divisible by {vae_scale_factor * 2} but is {width}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState): + block_state = self.get_block_state(state) + + if block_state.resized_image is None and block_state.image is None: + raise ValueError("resized_image and image cannot be None at the same time") + + if block_state.resized_image is None: + image = block_state.image + self.check_inputs( + height=block_state.height, width=block_state.width, vae_scale_factor=components.vae_scale_factor + ) + height = block_state.height or components.default_height + width = block_state.width or components.default_width + else: + width, height = block_state.resized_image[0].size + image = block_state.resized_image + + block_state.processed_image, block_state.processed_mask_image, block_state.mask_overlay_kwargs = ( + components.image_mask_processor.preprocess( + image=image, + mask=block_state.mask_image, + height=height, + width=width, + padding_mask_crop=block_state.padding_mask_crop, + ) + ) + + self.set_block_state(state, block_state) + return components, state + + +class QwenImageProcessImagesInputStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "Image Preprocess step. Images can be resized first using QwenImageEditResizeDynamicStep." + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec( + "image_processor", + VaeImageProcessor, + config=FrozenDict({"vae_scale_factor": 16}), + default_creation_method="from_config", + ), + ] + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam("resized_image"), + InputParam("image"), + InputParam("height"), + InputParam("width"), + ] + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam(name="processed_image"), + ] + + @staticmethod + def check_inputs(height, width, vae_scale_factor): + if height is not None and height % (vae_scale_factor * 2) != 0: + raise ValueError(f"Height must be divisible by {vae_scale_factor * 2} but is {height}") + + if width is not None and width % (vae_scale_factor * 2) != 0: + raise ValueError(f"Width must be divisible by {vae_scale_factor * 2} but is {width}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState): + block_state = self.get_block_state(state) + + if block_state.resized_image is None and block_state.image is None: + raise ValueError("resized_image and image cannot be None at the same time") + + if block_state.resized_image is None: + image = block_state.image + self.check_inputs( + height=block_state.height, width=block_state.width, vae_scale_factor=components.vae_scale_factor + ) + height = block_state.height or components.default_height + width = block_state.width or components.default_width + else: + width, height = block_state.resized_image[0].size + image = block_state.resized_image + + block_state.processed_image = components.image_processor.preprocess( + image=image, + height=height, + width=width, + ) + + self.set_block_state(state, block_state) + return components, state + + +class QwenImageVaeEncoderDynamicStep(ModularPipelineBlocks): + model_name = "qwenimage" + + def __init__( + self, + input_name: str = "processed_image", + output_name: str = "image_latents", + ): + """Initialize a VAE encoder step for converting images to latent representations. + + Both the input and output names are configurable so this block can be configured to process to different image + inputs (e.g., "processed_image" -> "image_latents", "processed_control_image" -> "control_image_latents"). + + Args: + input_name (str, optional): Name of the input image tensor. Defaults to "processed_image". + Examples: "processed_image" or "processed_control_image" + output_name (str, optional): Name of the output latent tensor. Defaults to "image_latents". + Examples: "image_latents" or "control_image_latents" + + Examples: + # Basic usage with default settings (includes image processor) QwenImageVaeEncoderDynamicStep() + + # Custom input/output names for control image QwenImageVaeEncoderDynamicStep( + input_name="processed_control_image", output_name="control_image_latents" + ) + """ + self._image_input_name = input_name + self._image_latents_output_name = output_name + super().__init__() + + @property + def description(self) -> str: + return f"Dynamic VAE Encoder step that converts {self._image_input_name} into latent representations {self._image_latents_output_name}.\n" + + @property + def expected_components(self) -> List[ComponentSpec]: + components = [ + ComponentSpec("vae", AutoencoderKLQwenImage), + ] + return components + + @property + def inputs(self) -> List[InputParam]: + inputs = [ + InputParam(self._image_input_name, required=True), + InputParam("generator"), + ] + return inputs + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + self._image_latents_output_name, + type_hint=torch.Tensor, + description="The latents representing the reference image", + ) + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + device = components._execution_device + dtype = components.vae.dtype + + image = getattr(block_state, self._image_input_name) + + # Encode image into latents + image_latents = encode_vae_image( + image=image, + vae=components.vae, + generator=block_state.generator, + device=device, + dtype=dtype, + latent_channels=components.num_channels_latents, + ) + + setattr(block_state, self._image_latents_output_name, image_latents) + + self.set_block_state(state, block_state) + + return components, state + + +class QwenImageControlNetVaeEncoderStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "VAE Encoder step that converts `control_image` into latent representations control_image_latents.\n" + + @property + def expected_components(self) -> List[ComponentSpec]: + components = [ + ComponentSpec("vae", AutoencoderKLQwenImage), + ComponentSpec("controlnet", QwenImageControlNetModel), + ComponentSpec( + "control_image_processor", + VaeImageProcessor, + config=FrozenDict({"vae_scale_factor": 16}), + default_creation_method="from_config", + ), + ] + return components + + @property + def inputs(self) -> List[InputParam]: + inputs = [ + InputParam("control_image", required=True), + InputParam("height"), + InputParam("width"), + InputParam("generator"), + ] + return inputs + + @property + def intermediate_outputs(self) -> List[OutputParam]: + return [ + OutputParam( + "control_image_latents", + type_hint=torch.Tensor, + description="The latents representing the control image", + ) + ] + + @staticmethod + def check_inputs(height, width, vae_scale_factor): + if height is not None and height % (vae_scale_factor * 2) != 0: + raise ValueError(f"Height must be divisible by {vae_scale_factor * 2} but is {height}") + + if width is not None and width % (vae_scale_factor * 2) != 0: + raise ValueError(f"Width must be divisible by {vae_scale_factor * 2} but is {width}") + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + self.check_inputs(block_state.height, block_state.width, components.vae_scale_factor) + + device = components._execution_device + dtype = components.vae.dtype + + height = block_state.height or components.default_height + width = block_state.width or components.default_width + + controlnet = unwrap_module(components.controlnet) + if isinstance(controlnet, QwenImageMultiControlNetModel) and not isinstance(block_state.control_image, list): + block_state.control_image = [block_state.control_image] + + if isinstance(controlnet, QwenImageMultiControlNetModel): + block_state.control_image_latents = [] + for control_image_ in block_state.control_image: + control_image_ = components.control_image_processor.preprocess( + image=control_image_, + height=height, + width=width, + ) + + control_image_latents_ = encode_vae_image( + image=control_image_, + vae=components.vae, + generator=block_state.generator, + device=device, + dtype=dtype, + latent_channels=components.num_channels_latents, + sample_mode="sample", + ) + block_state.control_image_latents.append(control_image_latents_) + + elif isinstance(controlnet, QwenImageControlNetModel): + control_image = components.control_image_processor.preprocess( + image=block_state.control_image, + height=height, + width=width, + ) + block_state.control_image_latents = encode_vae_image( + image=control_image, + vae=components.vae, + generator=block_state.generator, + device=device, + dtype=dtype, + latent_channels=components.num_channels_latents, + sample_mode="sample", + ) + + else: + raise ValueError( + f"Expected controlnet to be a QwenImageControlNetModel or QwenImageMultiControlNetModel, got {type(controlnet)}" + ) + + self.set_block_state(state, block_state) + + return components, state diff --git a/src/diffusers/modular_pipelines/qwenimage/inputs.py b/src/diffusers/modular_pipelines/qwenimage/inputs.py new file mode 100644 index 0000000000..2b787c8238 --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/inputs.py @@ -0,0 +1,431 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Tuple + +import torch + +from ...models import QwenImageMultiControlNetModel +from ..modular_pipeline import ModularPipelineBlocks, PipelineState +from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam +from .modular_pipeline import QwenImageModularPipeline, QwenImagePachifier + + +def repeat_tensor_to_batch_size( + input_name: str, + input_tensor: torch.Tensor, + batch_size: int, + num_images_per_prompt: int = 1, +) -> torch.Tensor: + """Repeat tensor elements to match the final batch size. + + This function expands a tensor's batch dimension to match the final batch size (batch_size * num_images_per_prompt) + by repeating each element along dimension 0. + + The input tensor must have batch size 1 or batch_size. The function will: + - If batch size is 1: repeat each element (batch_size * num_images_per_prompt) times + - If batch size equals batch_size: repeat each element num_images_per_prompt times + + Args: + input_name (str): Name of the input tensor (used for error messages) + input_tensor (torch.Tensor): The tensor to repeat. Must have batch size 1 or batch_size. + batch_size (int): The base batch size (number of prompts) + num_images_per_prompt (int, optional): Number of images to generate per prompt. Defaults to 1. + + Returns: + torch.Tensor: The repeated tensor with final batch size (batch_size * num_images_per_prompt) + + Raises: + ValueError: If input_tensor is not a torch.Tensor or has invalid batch size + + Examples: + tensor = torch.tensor([[1, 2, 3]]) # shape: [1, 3] repeated = repeat_tensor_to_batch_size("image", tensor, + batch_size=2, num_images_per_prompt=2) repeated # tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) - shape: + [4, 3] + + tensor = torch.tensor([[1, 2, 3], [4, 5, 6]]) # shape: [2, 3] repeated = repeat_tensor_to_batch_size("image", + tensor, batch_size=2, num_images_per_prompt=2) repeated # tensor([[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) + - shape: [4, 3] + """ + # make sure input is a tensor + if not isinstance(input_tensor, torch.Tensor): + raise ValueError(f"`{input_name}` must be a tensor") + + # make sure input tensor e.g. image_latents has batch size 1 or batch_size same as prompts + if input_tensor.shape[0] == 1: + repeat_by = batch_size * num_images_per_prompt + elif input_tensor.shape[0] == batch_size: + repeat_by = num_images_per_prompt + else: + raise ValueError( + f"`{input_name}` must have have batch size 1 or {batch_size}, but got {input_tensor.shape[0]}" + ) + + # expand the tensor to match the batch_size * num_images_per_prompt + input_tensor = input_tensor.repeat_interleave(repeat_by, dim=0) + + return input_tensor + + +def calculate_dimension_from_latents(latents: torch.Tensor, vae_scale_factor: int) -> Tuple[int, int]: + """Calculate image dimensions from latent tensor dimensions. + + This function converts latent space dimensions to image space dimensions by multiplying the latent height and width + by the VAE scale factor. + + Args: + latents (torch.Tensor): The latent tensor. Must have 4 or 5 dimensions. + Expected shapes: [batch, channels, height, width] or [batch, channels, frames, height, width] + vae_scale_factor (int): The scale factor used by the VAE to compress images. + Typically 8 for most VAEs (image is 8x larger than latents in each dimension) + + Returns: + Tuple[int, int]: The calculated image dimensions as (height, width) + + Raises: + ValueError: If latents tensor doesn't have 4 or 5 dimensions + + """ + # make sure the latents are not packed + if latents.ndim != 4 and latents.ndim != 5: + raise ValueError(f"unpacked latents must have 4 or 5 dimensions, but got {latents.ndim}") + + latent_height, latent_width = latents.shape[-2:] + + height = latent_height * vae_scale_factor + width = latent_width * vae_scale_factor + + return height, width + + +class QwenImageTextInputsStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + summary_section = ( + "Text input processing step that standardizes text embeddings for the pipeline.\n" + "This step:\n" + " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n" + " 2. Ensures all text embeddings have consistent batch sizes (batch_size * num_images_per_prompt)" + ) + + # Placement guidance + placement_section = "\n\nThis block should be placed after all encoder steps to process the text embeddings before they are used in subsequent pipeline steps." + + return summary_section + placement_section + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="num_images_per_prompt", default=1), + InputParam(name="prompt_embeds", required=True, kwargs_type="denoiser_input_fields"), + InputParam(name="prompt_embeds_mask", required=True, kwargs_type="denoiser_input_fields"), + InputParam(name="negative_prompt_embeds", kwargs_type="denoiser_input_fields"), + InputParam(name="negative_prompt_embeds_mask", kwargs_type="denoiser_input_fields"), + ] + + @property + def intermediate_outputs(self) -> List[str]: + return [ + OutputParam( + "batch_size", + type_hint=int, + description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt", + ), + OutputParam( + "dtype", + type_hint=torch.dtype, + description="Data type of model tensor inputs (determined by `prompt_embeds`)", + ), + ] + + @staticmethod + def check_inputs( + prompt_embeds, + prompt_embeds_mask, + negative_prompt_embeds, + negative_prompt_embeds_mask, + ): + if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: + raise ValueError("`negative_prompt_embeds_mask` is required when `negative_prompt_embeds` is not None") + + if negative_prompt_embeds is None and negative_prompt_embeds_mask is not None: + raise ValueError("cannot pass `negative_prompt_embeds_mask` without `negative_prompt_embeds`") + + if prompt_embeds_mask.shape[0] != prompt_embeds.shape[0]: + raise ValueError("`prompt_embeds_mask` must have the same batch size as `prompt_embeds`") + + elif negative_prompt_embeds is not None and negative_prompt_embeds.shape[0] != prompt_embeds.shape[0]: + raise ValueError("`negative_prompt_embeds` must have the same batch size as `prompt_embeds`") + + elif ( + negative_prompt_embeds_mask is not None and negative_prompt_embeds_mask.shape[0] != prompt_embeds.shape[0] + ): + raise ValueError("`negative_prompt_embeds_mask` must have the same batch size as `prompt_embeds`") + + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + self.check_inputs( + prompt_embeds=block_state.prompt_embeds, + prompt_embeds_mask=block_state.prompt_embeds_mask, + negative_prompt_embeds=block_state.negative_prompt_embeds, + negative_prompt_embeds_mask=block_state.negative_prompt_embeds_mask, + ) + + block_state.batch_size = block_state.prompt_embeds.shape[0] + block_state.dtype = block_state.prompt_embeds.dtype + + _, seq_len, _ = block_state.prompt_embeds.shape + + block_state.prompt_embeds = block_state.prompt_embeds.repeat(1, block_state.num_images_per_prompt, 1) + block_state.prompt_embeds = block_state.prompt_embeds.view( + block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 + ) + + block_state.prompt_embeds_mask = block_state.prompt_embeds_mask.repeat(1, block_state.num_images_per_prompt, 1) + block_state.prompt_embeds_mask = block_state.prompt_embeds_mask.view( + block_state.batch_size * block_state.num_images_per_prompt, seq_len + ) + + if block_state.negative_prompt_embeds is not None: + _, seq_len, _ = block_state.negative_prompt_embeds.shape + block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.repeat( + 1, block_state.num_images_per_prompt, 1 + ) + block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.view( + block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 + ) + + block_state.negative_prompt_embeds_mask = block_state.negative_prompt_embeds_mask.repeat( + 1, block_state.num_images_per_prompt, 1 + ) + block_state.negative_prompt_embeds_mask = block_state.negative_prompt_embeds_mask.view( + block_state.batch_size * block_state.num_images_per_prompt, seq_len + ) + + self.set_block_state(state, block_state) + + return components, state + + +class QwenImageInputsDynamicStep(ModularPipelineBlocks): + model_name = "qwenimage" + + def __init__( + self, + image_latent_inputs: List[str] = ["image_latents"], + additional_batch_inputs: List[str] = [], + ): + """Initialize a configurable step that standardizes the inputs for the denoising step. It:\n" + + This step handles multiple common tasks to prepare inputs for the denoising step: + 1. For encoded image latents, use it update height/width if None, patchifies, and expands batch size + 2. For additional_batch_inputs: Only expands batch dimensions to match final batch size + + This is a dynamic block that allows you to configure which inputs to process. + + Args: + image_latent_inputs (List[str], optional): Names of image latent tensors to process. + These will be used to determine height/width, patchified, and batch-expanded. Can be a single string or + list of strings. Defaults to ["image_latents"]. Examples: ["image_latents"], ["control_image_latents"] + additional_batch_inputs (List[str], optional): + Names of additional conditional input tensors to expand batch size. These tensors will only have their + batch dimensions adjusted to match the final batch size. Can be a single string or list of strings. + Defaults to []. Examples: ["processed_mask_image"] + + Examples: + # Configure to process image_latents (default behavior) QwenImageInputsDynamicStep() + + # Configure to process multiple image latent inputs + QwenImageInputsDynamicStep(image_latent_inputs=["image_latents", "control_image_latents"]) + + # Configure to process image latents and additional batch inputs QwenImageInputsDynamicStep( + image_latent_inputs=["image_latents"], additional_batch_inputs=["processed_mask_image"] + ) + """ + if not isinstance(image_latent_inputs, list): + image_latent_inputs = [image_latent_inputs] + if not isinstance(additional_batch_inputs, list): + additional_batch_inputs = [additional_batch_inputs] + + self._image_latent_inputs = image_latent_inputs + self._additional_batch_inputs = additional_batch_inputs + super().__init__() + + @property + def description(self) -> str: + # Functionality section + summary_section = ( + "Input processing step that:\n" + " 1. For image latent inputs: Updates height/width if None, patchifies latents, and expands batch size\n" + " 2. For additional batch inputs: Expands batch dimensions to match final batch size" + ) + + # Inputs info + inputs_info = "" + if self._image_latent_inputs or self._additional_batch_inputs: + inputs_info = "\n\nConfigured inputs:" + if self._image_latent_inputs: + inputs_info += f"\n - Image latent inputs: {self._image_latent_inputs}" + if self._additional_batch_inputs: + inputs_info += f"\n - Additional batch inputs: {self._additional_batch_inputs}" + + # Placement guidance + placement_section = "\n\nThis block should be placed after the encoder steps and the text input step." + + return summary_section + inputs_info + placement_section + + @property + def inputs(self) -> List[InputParam]: + inputs = [ + InputParam(name="num_images_per_prompt", default=1), + InputParam(name="batch_size", required=True), + InputParam(name="height"), + InputParam(name="width"), + ] + + # Add image latent inputs + for image_latent_input_name in self._image_latent_inputs: + inputs.append(InputParam(name=image_latent_input_name)) + + # Add additional batch inputs + for input_name in self._additional_batch_inputs: + inputs.append(InputParam(name=input_name)) + + return inputs + + @property + def expected_components(self) -> List[ComponentSpec]: + return [ + ComponentSpec("pachifier", QwenImagePachifier, default_creation_method="from_config"), + ] + + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + # Process image latent inputs (height/width calculation, patchify, and batch expansion) + for image_latent_input_name in self._image_latent_inputs: + image_latent_tensor = getattr(block_state, image_latent_input_name) + if image_latent_tensor is None: + continue + + # 1. Calculate height/width from latents + height, width = calculate_dimension_from_latents(image_latent_tensor, components.vae_scale_factor) + block_state.height = block_state.height or height + block_state.width = block_state.width or width + + # 2. Patchify the image latent tensor + image_latent_tensor = components.pachifier.pack_latents(image_latent_tensor) + + # 3. Expand batch size + image_latent_tensor = repeat_tensor_to_batch_size( + input_name=image_latent_input_name, + input_tensor=image_latent_tensor, + num_images_per_prompt=block_state.num_images_per_prompt, + batch_size=block_state.batch_size, + ) + + setattr(block_state, image_latent_input_name, image_latent_tensor) + + # Process additional batch inputs (only batch expansion) + for input_name in self._additional_batch_inputs: + input_tensor = getattr(block_state, input_name) + if input_tensor is None: + continue + + # Only expand batch size + input_tensor = repeat_tensor_to_batch_size( + input_name=input_name, + input_tensor=input_tensor, + num_images_per_prompt=block_state.num_images_per_prompt, + batch_size=block_state.batch_size, + ) + + setattr(block_state, input_name, input_tensor) + + self.set_block_state(state, block_state) + return components, state + + +class QwenImageControlNetInputsStep(ModularPipelineBlocks): + model_name = "qwenimage" + + @property + def description(self) -> str: + return "prepare the `control_image_latents` for controlnet. Insert after all the other inputs steps." + + @property + def inputs(self) -> List[InputParam]: + return [ + InputParam(name="control_image_latents", required=True), + InputParam(name="batch_size", required=True), + InputParam(name="num_images_per_prompt", default=1), + InputParam(name="height"), + InputParam(name="width"), + ] + + @torch.no_grad() + def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState: + block_state = self.get_block_state(state) + + if isinstance(components.controlnet, QwenImageMultiControlNetModel): + control_image_latents = [] + # loop through each control_image_latents + for i, control_image_latents_ in enumerate(block_state.control_image_latents): + # 1. update height/width if not provided + height, width = calculate_dimension_from_latents(control_image_latents_, components.vae_scale_factor) + block_state.height = block_state.height or height + block_state.width = block_state.width or width + + # 2. pack + control_image_latents_ = components.pachifier.pack_latents(control_image_latents_) + + # 3. repeat to match the batch size + control_image_latents_ = repeat_tensor_to_batch_size( + input_name=f"control_image_latents[{i}]", + input_tensor=control_image_latents_, + num_images_per_prompt=block_state.num_images_per_prompt, + batch_size=block_state.batch_size, + ) + + control_image_latents.append(control_image_latents_) + + block_state.control_image_latents = control_image_latents + + else: + # 1. update height/width if not provided + height, width = calculate_dimension_from_latents( + block_state.control_image_latents, components.vae_scale_factor + ) + block_state.height = block_state.height or height + block_state.width = block_state.width or width + + # 2. pack + block_state.control_image_latents = components.pachifier.pack_latents(block_state.control_image_latents) + + # 3. repeat to match the batch size + block_state.control_image_latents = repeat_tensor_to_batch_size( + input_name="control_image_latents", + input_tensor=block_state.control_image_latents, + num_images_per_prompt=block_state.num_images_per_prompt, + batch_size=block_state.batch_size, + ) + + block_state.control_image_latents = block_state.control_image_latents + + self.set_block_state(state, block_state) + + return components, state diff --git a/src/diffusers/modular_pipelines/qwenimage/modular_blocks.py b/src/diffusers/modular_pipelines/qwenimage/modular_blocks.py new file mode 100644 index 0000000000..a01c742fcf --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/modular_blocks.py @@ -0,0 +1,841 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ...utils import logging +from ..modular_pipeline import AutoPipelineBlocks, SequentialPipelineBlocks +from ..modular_pipeline_utils import InsertableDict +from .before_denoise import ( + QwenImageControlNetBeforeDenoiserStep, + QwenImageCreateMaskLatentsStep, + QwenImageEditRoPEInputsStep, + QwenImagePrepareLatentsStep, + QwenImagePrepareLatentsWithStrengthStep, + QwenImageRoPEInputsStep, + QwenImageSetTimestepsStep, + QwenImageSetTimestepsWithStrengthStep, +) +from .decoders import QwenImageDecoderStep, QwenImageInpaintProcessImagesOutputStep, QwenImageProcessImagesOutputStep +from .denoise import ( + QwenImageControlNetDenoiseStep, + QwenImageDenoiseStep, + QwenImageEditDenoiseStep, + QwenImageEditInpaintDenoiseStep, + QwenImageInpaintControlNetDenoiseStep, + QwenImageInpaintDenoiseStep, + QwenImageLoopBeforeDenoiserControlNet, +) +from .encoders import ( + QwenImageControlNetVaeEncoderStep, + QwenImageEditResizeDynamicStep, + QwenImageEditTextEncoderStep, + QwenImageInpaintProcessImagesInputStep, + QwenImageProcessImagesInputStep, + QwenImageTextEncoderStep, + QwenImageVaeEncoderDynamicStep, +) +from .inputs import QwenImageControlNetInputsStep, QwenImageInputsDynamicStep, QwenImageTextInputsStep + + +logger = logging.get_logger(__name__) + +# 1. QwenImage + +## 1.1 QwenImage/text2image + +#### QwenImage/decode +#### (standard decode step works for most tasks except for inpaint) +QwenImageDecodeBlocks = InsertableDict( + [ + ("decode", QwenImageDecoderStep()), + ("postprocess", QwenImageProcessImagesOutputStep()), + ] +) + + +class QwenImageDecodeStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageDecodeBlocks.values() + block_names = QwenImageDecodeBlocks.keys() + + @property + def description(self): + return "Decode step that decodes the latents to images and postprocess the generated image." + + +#### QwenImage/text2image presets +TEXT2IMAGE_BLOCKS = InsertableDict( + [ + ("text_encoder", QwenImageTextEncoderStep()), + ("input", QwenImageTextInputsStep()), + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsStep()), + ("prepare_rope_inputs", QwenImageRoPEInputsStep()), + ("denoise", QwenImageDenoiseStep()), + ("decode", QwenImageDecodeStep()), + ] +) + + +## 1.2 QwenImage/inpaint + +#### QwenImage/inpaint vae encoder +QwenImageInpaintVaeEncoderBlocks = InsertableDict( + [ + ( + "preprocess", + QwenImageInpaintProcessImagesInputStep, + ), # image, mask_image -> processed_image, processed_mask_image, mask_overlay_kwargs + ("encode", QwenImageVaeEncoderDynamicStep()), # processed_image -> image_latents + ] +) + + +class QwenImageInpaintVaeEncoderStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageInpaintVaeEncoderBlocks.values() + block_names = QwenImageInpaintVaeEncoderBlocks.keys() + + @property + def description(self) -> str: + return ( + "This step is used for processing image and mask inputs for inpainting tasks. It:\n" + " - Resizes the image to the target size, based on `height` and `width`.\n" + " - Processes and updates `image` and `mask_image`.\n" + " - Creates `image_latents`." + ) + + +#### QwenImage/inpaint inputs +QwenImageInpaintInputBlocks = InsertableDict( + [ + ("text_inputs", QwenImageTextInputsStep()), # default step to process text embeddings + ( + "additional_inputs", + QwenImageInputsDynamicStep( + image_latent_inputs=["image_latents"], additional_batch_inputs=["processed_mask_image"] + ), + ), + ] +) + + +class QwenImageInpaintInputStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageInpaintInputBlocks.values() + block_names = QwenImageInpaintInputBlocks.keys() + + @property + def description(self): + return "Input step that prepares the inputs for the inpainting denoising step. It:\n" + " - make sure the text embeddings have consistent batch size as well as the additional inputs (`image_latents` and `processed_mask_image`).\n" + " - update height/width based `image_latents`, patchify `image_latents`." + + +# QwenImage/inpaint prepare latents +QwenImageInpaintPrepareLatentsBlocks = InsertableDict( + [ + ("add_noise_to_latents", QwenImagePrepareLatentsWithStrengthStep()), + ("create_mask_latents", QwenImageCreateMaskLatentsStep()), + ] +) + + +class QwenImageInpaintPrepareLatentsStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageInpaintPrepareLatentsBlocks.values() + block_names = QwenImageInpaintPrepareLatentsBlocks.keys() + + @property + def description(self) -> str: + return ( + "This step prepares the latents/image_latents and mask inputs for the inpainting denoising step. It:\n" + " - Add noise to the image latents to create the latents input for the denoiser.\n" + " - Create the pachified latents `mask` based on the processedmask image.\n" + ) + + +#### QwenImage/inpaint decode +QwenImageInpaintDecodeBlocks = InsertableDict( + [ + ("decode", QwenImageDecoderStep()), + ("postprocess", QwenImageInpaintProcessImagesOutputStep()), + ] +) + + +class QwenImageInpaintDecodeStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageInpaintDecodeBlocks.values() + block_names = QwenImageInpaintDecodeBlocks.keys() + + @property + def description(self): + return "Decode step that decodes the latents to images and postprocess the generated image, optional apply the mask overally to the original image." + + +#### QwenImage/inpaint presets +INPAINT_BLOCKS = InsertableDict( + [ + ("text_encoder", QwenImageTextEncoderStep()), + ("vae_encoder", QwenImageInpaintVaeEncoderStep()), + ("input", QwenImageInpaintInputStep()), + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsWithStrengthStep()), + ("prepare_inpaint_latents", QwenImageInpaintPrepareLatentsStep()), + ("prepare_rope_inputs", QwenImageRoPEInputsStep()), + ("denoise", QwenImageInpaintDenoiseStep()), + ("decode", QwenImageInpaintDecodeStep()), + ] +) + + +## 1.3 QwenImage/img2img + +#### QwenImage/img2img vae encoder +QwenImageImg2ImgVaeEncoderBlocks = InsertableDict( + [ + ("preprocess", QwenImageProcessImagesInputStep()), + ("encode", QwenImageVaeEncoderDynamicStep()), + ] +) + + +class QwenImageImg2ImgVaeEncoderStep(SequentialPipelineBlocks): + model_name = "qwenimage" + + block_classes = QwenImageImg2ImgVaeEncoderBlocks.values() + block_names = QwenImageImg2ImgVaeEncoderBlocks.keys() + + @property + def description(self) -> str: + return "Vae encoder step that preprocess andencode the image inputs into their latent representations." + + +#### QwenImage/img2img inputs +QwenImageImg2ImgInputBlocks = InsertableDict( + [ + ("text_inputs", QwenImageTextInputsStep()), # default step to process text embeddings + ("additional_inputs", QwenImageInputsDynamicStep(image_latent_inputs=["image_latents"])), + ] +) + + +class QwenImageImg2ImgInputStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageImg2ImgInputBlocks.values() + block_names = QwenImageImg2ImgInputBlocks.keys() + + @property + def description(self): + return "Input step that prepares the inputs for the img2img denoising step. It:\n" + " - make sure the text embeddings have consistent batch size as well as the additional inputs (`image_latents`).\n" + " - update height/width based `image_latents`, patchify `image_latents`." + + +#### QwenImage/img2img presets +IMAGE2IMAGE_BLOCKS = InsertableDict( + [ + ("text_encoder", QwenImageTextEncoderStep()), + ("vae_encoder", QwenImageImg2ImgVaeEncoderStep()), + ("input", QwenImageImg2ImgInputStep()), + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsWithStrengthStep()), + ("prepare_img2img_latents", QwenImagePrepareLatentsWithStrengthStep()), + ("prepare_rope_inputs", QwenImageRoPEInputsStep()), + ("denoise", QwenImageDenoiseStep()), + ("decode", QwenImageDecodeStep()), + ] +) + + +## 1.4 QwenImage/controlnet + +#### QwenImage/controlnet presets +CONTROLNET_BLOCKS = InsertableDict( + [ + ("controlnet_vae_encoder", QwenImageControlNetVaeEncoderStep()), # vae encoder step for control_image + ("controlnet_inputs", QwenImageControlNetInputsStep()), # additional input step for controlnet + ( + "controlnet_before_denoise", + QwenImageControlNetBeforeDenoiserStep(), + ), # before denoise step (after set_timesteps step) + ( + "controlnet_denoise_loop_before", + QwenImageLoopBeforeDenoiserControlNet(), + ), # controlnet loop step (insert before the denoiseloop_denoiser) + ] +) + + +## 1.5 QwenImage/auto encoders + + +#### for inpaint and img2img tasks +class QwenImageAutoVaeEncoderStep(AutoPipelineBlocks): + block_classes = [QwenImageInpaintVaeEncoderStep, QwenImageImg2ImgVaeEncoderStep] + block_names = ["inpaint", "img2img"] + block_trigger_inputs = ["mask_image", "image"] + + @property + def description(self): + return ( + "Vae encoder step that encode the image inputs into their latent representations.\n" + + "This is an auto pipeline block.\n" + + " - `QwenImageInpaintVaeEncoderStep` (inpaint) is used when `mask_image` is provided.\n" + + " - `QwenImageImg2ImgVaeEncoderStep` (img2img) is used when `image` is provided.\n" + + " - if `mask_image` or `image` is not provided, step will be skipped." + ) + + +# for controlnet tasks +class QwenImageOptionalControlNetVaeEncoderStep(AutoPipelineBlocks): + block_classes = [QwenImageControlNetVaeEncoderStep] + block_names = ["controlnet"] + block_trigger_inputs = ["control_image"] + + @property + def description(self): + return ( + "Vae encoder step that encode the image inputs into their latent representations.\n" + + "This is an auto pipeline block.\n" + + " - `QwenImageControlNetVaeEncoderStep` (controlnet) is used when `control_image` is provided.\n" + + " - if `control_image` is not provided, step will be skipped." + ) + + +## 1.6 QwenImage/auto inputs + + +# text2image/inpaint/img2img +class QwenImageAutoInputStep(AutoPipelineBlocks): + block_classes = [QwenImageInpaintInputStep, QwenImageImg2ImgInputStep, QwenImageTextInputsStep] + block_names = ["inpaint", "img2img", "text2image"] + block_trigger_inputs = ["processed_mask_image", "image_latents", None] + + @property + def description(self): + return ( + "Input step that standardize the inputs for the denoising step, e.g. make sure inputs have consistent batch size, and patchified. \n" + " This is an auto pipeline block that works for text2image/inpaint/img2img tasks.\n" + + " - `QwenImageInpaintInputStep` (inpaint) is used when `processed_mask_image` is provided.\n" + + " - `QwenImageImg2ImgInputStep` (img2img) is used when `image_latents` is provided.\n" + + " - `QwenImageTextInputsStep` (text2image) is used when both `processed_mask_image` and `image_latents` are not provided.\n" + ) + + +# controlnet +class QwenImageOptionalControlNetInputStep(AutoPipelineBlocks): + block_classes = [QwenImageControlNetInputsStep] + block_names = ["controlnet"] + block_trigger_inputs = ["control_image_latents"] + + @property + def description(self): + return ( + "Controlnet input step that prepare the control_image_latents input.\n" + + "This is an auto pipeline block.\n" + + " - `QwenImageControlNetInputsStep` (controlnet) is used when `control_image_latents` is provided.\n" + + " - if `control_image_latents` is not provided, step will be skipped." + ) + + +## 1.7 QwenImage/auto before denoise step +# compose the steps into a BeforeDenoiseStep for text2image/img2img/inpaint tasks before combine into an auto step + +# QwenImage/text2image before denoise +QwenImageText2ImageBeforeDenoiseBlocks = InsertableDict( + [ + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsStep()), + ("prepare_rope_inputs", QwenImageRoPEInputsStep()), + ] +) + + +class QwenImageText2ImageBeforeDenoiseStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageText2ImageBeforeDenoiseBlocks.values() + block_names = QwenImageText2ImageBeforeDenoiseBlocks.keys() + + @property + def description(self): + return "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step for text2image task." + + +# QwenImage/inpaint before denoise +QwenImageInpaintBeforeDenoiseBlocks = InsertableDict( + [ + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsWithStrengthStep()), + ("prepare_inpaint_latents", QwenImageInpaintPrepareLatentsStep()), + ("prepare_rope_inputs", QwenImageRoPEInputsStep()), + ] +) + + +class QwenImageInpaintBeforeDenoiseStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageInpaintBeforeDenoiseBlocks.values() + block_names = QwenImageInpaintBeforeDenoiseBlocks.keys() + + @property + def description(self): + return "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step for inpaint task." + + +# QwenImage/img2img before denoise +QwenImageImg2ImgBeforeDenoiseBlocks = InsertableDict( + [ + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsWithStrengthStep()), + ("prepare_img2img_latents", QwenImagePrepareLatentsWithStrengthStep()), + ("prepare_rope_inputs", QwenImageRoPEInputsStep()), + ] +) + + +class QwenImageImg2ImgBeforeDenoiseStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageImg2ImgBeforeDenoiseBlocks.values() + block_names = QwenImageImg2ImgBeforeDenoiseBlocks.keys() + + @property + def description(self): + return "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step for img2img task." + + +# auto before_denoise step for text2image, inpaint, img2img tasks +class QwenImageAutoBeforeDenoiseStep(AutoPipelineBlocks): + block_classes = [ + QwenImageInpaintBeforeDenoiseStep, + QwenImageImg2ImgBeforeDenoiseStep, + QwenImageText2ImageBeforeDenoiseStep, + ] + block_names = ["inpaint", "img2img", "text2image"] + block_trigger_inputs = ["processed_mask_image", "image_latents", None] + + @property + def description(self): + return ( + "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step.\n" + + "This is an auto pipeline block that works for text2img, inpainting, img2img tasks.\n" + + " - `QwenImageInpaintBeforeDenoiseStep` (inpaint) is used when `processed_mask_image` is provided.\n" + + " - `QwenImageImg2ImgBeforeDenoiseStep` (img2img) is used when `image_latents` is provided.\n" + + " - `QwenImageText2ImageBeforeDenoiseStep` (text2image) is used when both `processed_mask_image` and `image_latents` are not provided.\n" + ) + + +# auto before_denoise step for controlnet tasks +class QwenImageOptionalControlNetBeforeDenoiseStep(AutoPipelineBlocks): + block_classes = [QwenImageControlNetBeforeDenoiserStep] + block_names = ["controlnet"] + block_trigger_inputs = ["control_image_latents"] + + @property + def description(self): + return ( + "Controlnet before denoise step that prepare the controlnet input.\n" + + "This is an auto pipeline block.\n" + + " - `QwenImageControlNetBeforeDenoiserStep` (controlnet) is used when `control_image_latents` is provided.\n" + + " - if `control_image_latents` is not provided, step will be skipped." + ) + + +## 1.8 QwenImage/auto denoise + + +# auto denoise step for controlnet tasks: works for all tasks with controlnet +class QwenImageControlNetAutoDenoiseStep(AutoPipelineBlocks): + block_classes = [QwenImageInpaintControlNetDenoiseStep, QwenImageControlNetDenoiseStep] + block_names = ["inpaint_denoise", "denoise"] + block_trigger_inputs = ["mask", None] + + @property + def description(self): + return ( + "Controlnet step during the denoising process. \n" + " This is an auto pipeline block that works for inpaint and text2image/img2img tasks with controlnet.\n" + + " - `QwenImageInpaintControlNetDenoiseStep` (inpaint) is used when `mask` is provided.\n" + + " - `QwenImageControlNetDenoiseStep` (text2image/img2img) is used when `mask` is not provided.\n" + ) + + +# auto denoise step for everything: works for all tasks with or without controlnet +class QwenImageAutoDenoiseStep(AutoPipelineBlocks): + block_classes = [ + QwenImageControlNetAutoDenoiseStep, + QwenImageInpaintDenoiseStep, + QwenImageDenoiseStep, + ] + block_names = ["controlnet_denoise", "inpaint_denoise", "denoise"] + block_trigger_inputs = ["control_image_latents", "mask", None] + + @property + def description(self): + return ( + "Denoise step that iteratively denoise the latents. \n" + " This is an auto pipeline block that works for inpaint/text2image/img2img tasks. It also works with controlnet\n" + + " - `QwenImageControlNetAutoDenoiseStep` (controlnet) is used when `control_image_latents` is provided.\n" + + " - `QwenImageInpaintDenoiseStep` (inpaint) is used when `mask` is provided and `control_image_latents` is not provided.\n" + + " - `QwenImageDenoiseStep` (text2image/img2img) is used when `mask` is not provided and `control_image_latents` is not provided.\n" + ) + + +## 1.9 QwenImage/auto decode +# auto decode step for inpaint and text2image tasks + + +class QwenImageAutoDecodeStep(AutoPipelineBlocks): + block_classes = [QwenImageInpaintDecodeStep, QwenImageDecodeStep] + block_names = ["inpaint_decode", "decode"] + block_trigger_inputs = ["mask", None] + + @property + def description(self): + return ( + "Decode step that decode the latents into images. \n" + " This is an auto pipeline block that works for inpaint/text2image/img2img tasks, for both QwenImage and QwenImage-Edit.\n" + + " - `QwenImageInpaintDecodeStep` (inpaint) is used when `mask` is provided.\n" + + " - `QwenImageDecodeStep` (text2image/img2img) is used when `mask` is not provided.\n" + ) + + +## 1.10 QwenImage/auto block & presets +AUTO_BLOCKS = InsertableDict( + [ + ("text_encoder", QwenImageTextEncoderStep()), + ("vae_encoder", QwenImageAutoVaeEncoderStep()), + ("controlnet_vae_encoder", QwenImageOptionalControlNetVaeEncoderStep()), + ("input", QwenImageAutoInputStep()), + ("controlnet_input", QwenImageOptionalControlNetInputStep()), + ("before_denoise", QwenImageAutoBeforeDenoiseStep()), + ("controlnet_before_denoise", QwenImageOptionalControlNetBeforeDenoiseStep()), + ("denoise", QwenImageAutoDenoiseStep()), + ("decode", QwenImageAutoDecodeStep()), + ] +) + + +class QwenImageAutoBlocks(SequentialPipelineBlocks): + model_name = "qwenimage" + + block_classes = AUTO_BLOCKS.values() + block_names = AUTO_BLOCKS.keys() + + @property + def description(self): + return ( + "Auto Modular pipeline for text-to-image, image-to-image, inpainting, and controlnet tasks using QwenImage.\n" + + "- for image-to-image generation, you need to provide `image`\n" + + "- for inpainting, you need to provide `mask_image` and `image`, optionally you can provide `padding_mask_crop` \n" + + "- to run the controlnet workflow, you need to provide `control_image`\n" + + "- for text-to-image generation, all you need to provide is `prompt`" + ) + + +# 2. QwenImage-Edit + +## 2.1 QwenImage-Edit/edit + +#### QwenImage-Edit/edit vl encoder: take both image and text prompts +QwenImageEditVLEncoderBlocks = InsertableDict( + [ + ("resize", QwenImageEditResizeDynamicStep()), + ("encode", QwenImageEditTextEncoderStep()), + ] +) + + +class QwenImageEditVLEncoderStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageEditVLEncoderBlocks.values() + block_names = QwenImageEditVLEncoderBlocks.keys() + + @property + def description(self) -> str: + return "QwenImage-Edit VL encoder step that encode the image an text prompts together." + + +#### QwenImage-Edit/edit vae encoder +QwenImageEditVaeEncoderBlocks = InsertableDict( + [ + ("resize", QwenImageEditResizeDynamicStep()), # edit has a different resize step + ("preprocess", QwenImageProcessImagesInputStep()), # resized_image -> processed_image + ("encode", QwenImageVaeEncoderDynamicStep()), # processed_image -> image_latents + ] +) + + +class QwenImageEditVaeEncoderStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageEditVaeEncoderBlocks.values() + block_names = QwenImageEditVaeEncoderBlocks.keys() + + @property + def description(self) -> str: + return "Vae encoder step that encode the image inputs into their latent representations." + + +#### QwenImage-Edit/edit input +QwenImageEditInputBlocks = InsertableDict( + [ + ("text_inputs", QwenImageTextInputsStep()), # default step to process text embeddings + ("additional_inputs", QwenImageInputsDynamicStep(image_latent_inputs=["image_latents"])), + ] +) + + +class QwenImageEditInputStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageEditInputBlocks.values() + block_names = QwenImageEditInputBlocks.keys() + + @property + def description(self): + return "Input step that prepares the inputs for the edit denoising step. It:\n" + " - make sure the text embeddings have consistent batch size as well as the additional inputs: \n" + " - `image_latents`.\n" + " - update height/width based `image_latents`, patchify `image_latents`." + + +#### QwenImage/edit presets +EDIT_BLOCKS = InsertableDict( + [ + ("text_encoder", QwenImageEditVLEncoderStep()), + ("vae_encoder", QwenImageEditVaeEncoderStep()), + ("input", QwenImageEditInputStep()), + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsStep()), + ("prepare_rope_inputs", QwenImageEditRoPEInputsStep()), + ("denoise", QwenImageEditDenoiseStep()), + ("decode", QwenImageDecodeStep()), + ] +) + + +## 2.2 QwenImage-Edit/edit inpaint + +#### QwenImage-Edit/edit inpaint vae encoder: the difference from regular inpaint is the resize step +QwenImageEditInpaintVaeEncoderBlocks = InsertableDict( + [ + ("resize", QwenImageEditResizeDynamicStep()), # image -> resized_image + ( + "preprocess", + QwenImageInpaintProcessImagesInputStep, + ), # resized_image, mask_image -> processed_image, processed_mask_image, mask_overlay_kwargs + ( + "encode", + QwenImageVaeEncoderDynamicStep(input_name="processed_image", output_name="image_latents"), + ), # processed_image -> image_latents + ] +) + + +class QwenImageEditInpaintVaeEncoderStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageEditInpaintVaeEncoderBlocks.values() + block_names = QwenImageEditInpaintVaeEncoderBlocks.keys() + + @property + def description(self) -> str: + return ( + "This step is used for processing image and mask inputs for QwenImage-Edit inpaint tasks. It:\n" + " - resize the image for target area (1024 * 1024) while maintaining the aspect ratio.\n" + " - process the resized image and mask image.\n" + " - create image latents." + ) + + +#### QwenImage-Edit/edit inpaint presets +EDIT_INPAINT_BLOCKS = InsertableDict( + [ + ("text_encoder", QwenImageEditVLEncoderStep()), + ("vae_encoder", QwenImageEditInpaintVaeEncoderStep()), + ("input", QwenImageInpaintInputStep()), + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsWithStrengthStep()), + ("prepare_inpaint_latents", QwenImageInpaintPrepareLatentsStep()), + ("prepare_rope_inputs", QwenImageEditRoPEInputsStep()), + ("denoise", QwenImageEditInpaintDenoiseStep()), + ("decode", QwenImageInpaintDecodeStep()), + ] +) + + +## 2.3 QwenImage-Edit/auto encoders + + +class QwenImageEditAutoVaeEncoderStep(AutoPipelineBlocks): + block_classes = [ + QwenImageEditInpaintVaeEncoderStep, + QwenImageEditVaeEncoderStep, + ] + block_names = ["edit_inpaint", "edit"] + block_trigger_inputs = ["mask_image", "image"] + + @property + def description(self): + return ( + "Vae encoder step that encode the image inputs into their latent representations. \n" + " This is an auto pipeline block that works for edit and edit_inpaint tasks.\n" + + " - `QwenImageEditInpaintVaeEncoderStep` (edit_inpaint) is used when `mask_image` is provided.\n" + + " - `QwenImageEditVaeEncoderStep` (edit) is used when `image` is provided.\n" + + " - if `mask_image` or `image` is not provided, step will be skipped." + ) + + +## 2.4 QwenImage-Edit/auto inputs +class QwenImageEditAutoInputStep(AutoPipelineBlocks): + block_classes = [QwenImageInpaintInputStep, QwenImageEditInputStep] + block_names = ["edit_inpaint", "edit"] + block_trigger_inputs = ["processed_mask_image", "image"] + + @property + def description(self): + return ( + "Input step that prepares the inputs for the edit denoising step.\n" + + " It is an auto pipeline block that works for edit and edit_inpaint tasks.\n" + + " - `QwenImageInpaintInputStep` (edit_inpaint) is used when `processed_mask_image` is provided.\n" + + " - `QwenImageEditInputStep` (edit) is used when `image_latents` is provided.\n" + + " - if `processed_mask_image` or `image_latents` is not provided, step will be skipped." + ) + + +## 2.5 QwenImage-Edit/auto before denoise +# compose the steps into a BeforeDenoiseStep for edit and edit_inpaint tasks before combine into an auto step + +#### QwenImage-Edit/edit before denoise +QwenImageEditBeforeDenoiseBlocks = InsertableDict( + [ + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsStep()), + ("prepare_rope_inputs", QwenImageEditRoPEInputsStep()), + ] +) + + +class QwenImageEditBeforeDenoiseStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageEditBeforeDenoiseBlocks.values() + block_names = QwenImageEditBeforeDenoiseBlocks.keys() + + @property + def description(self): + return "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step for edit task." + + +#### QwenImage-Edit/edit inpaint before denoise +QwenImageEditInpaintBeforeDenoiseBlocks = InsertableDict( + [ + ("prepare_latents", QwenImagePrepareLatentsStep()), + ("set_timesteps", QwenImageSetTimestepsWithStrengthStep()), + ("prepare_inpaint_latents", QwenImageInpaintPrepareLatentsStep()), + ("prepare_rope_inputs", QwenImageEditRoPEInputsStep()), + ] +) + + +class QwenImageEditInpaintBeforeDenoiseStep(SequentialPipelineBlocks): + model_name = "qwenimage" + block_classes = QwenImageEditInpaintBeforeDenoiseBlocks.values() + block_names = QwenImageEditInpaintBeforeDenoiseBlocks.keys() + + @property + def description(self): + return "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step for edit inpaint task." + + +# auto before_denoise step for edit and edit_inpaint tasks +class QwenImageEditAutoBeforeDenoiseStep(AutoPipelineBlocks): + model_name = "qwenimage-edit" + block_classes = [ + QwenImageEditInpaintBeforeDenoiseStep, + QwenImageEditBeforeDenoiseStep, + ] + block_names = ["edit_inpaint", "edit"] + block_trigger_inputs = ["processed_mask_image", "image_latents"] + + @property + def description(self): + return ( + "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step.\n" + + "This is an auto pipeline block that works for edit (img2img) and edit inpaint tasks.\n" + + " - `QwenImageEditInpaintBeforeDenoiseStep` (edit_inpaint) is used when `processed_mask_image` is provided.\n" + + " - `QwenImageEditBeforeDenoiseStep` (edit) is used when `image_latents` is provided and `processed_mask_image` is not provided.\n" + + " - if `image_latents` or `processed_mask_image` is not provided, step will be skipped." + ) + + +## 2.6 QwenImage-Edit/auto denoise + + +class QwenImageEditAutoDenoiseStep(AutoPipelineBlocks): + model_name = "qwenimage-edit" + + block_classes = [QwenImageEditInpaintDenoiseStep, QwenImageEditDenoiseStep] + block_names = ["inpaint_denoise", "denoise"] + block_trigger_inputs = ["processed_mask_image", "image_latents"] + + @property + def description(self): + return ( + "Denoise step that iteratively denoise the latents. \n" + + "This block supports edit (img2img) and edit inpaint tasks for QwenImage Edit. \n" + + " - `QwenImageEditInpaintDenoiseStep` (inpaint) is used when `processed_mask_image` is provided.\n" + + " - `QwenImageEditDenoiseStep` (img2img) is used when `image_latents` is provided.\n" + + " - if `processed_mask_image` or `image_latents` is not provided, step will be skipped." + ) + + +## 2.7 QwenImage-Edit/auto blocks & presets + +EDIT_AUTO_BLOCKS = InsertableDict( + [ + ("text_encoder", QwenImageEditVLEncoderStep()), + ("vae_encoder", QwenImageEditAutoVaeEncoderStep()), + ("input", QwenImageEditAutoInputStep()), + ("before_denoise", QwenImageEditAutoBeforeDenoiseStep()), + ("denoise", QwenImageEditAutoDenoiseStep()), + ("decode", QwenImageAutoDecodeStep()), + ] +) + + +class QwenImageEditAutoBlocks(SequentialPipelineBlocks): + model_name = "qwenimage-edit" + block_classes = EDIT_AUTO_BLOCKS.values() + block_names = EDIT_AUTO_BLOCKS.keys() + + @property + def description(self): + return ( + "Auto Modular pipeline for edit (img2img) and edit inpaint tasks using QwenImage-Edit.\n" + + "- for edit (img2img) generation, you need to provide `image`\n" + + "- for edit inpainting, you need to provide `mask_image` and `image`, optionally you can provide `padding_mask_crop` \n" + ) + + +# 3. all block presets supported in QwenImage & QwenImage-Edit + + +ALL_BLOCKS = { + "text2image": TEXT2IMAGE_BLOCKS, + "img2img": IMAGE2IMAGE_BLOCKS, + "edit": EDIT_BLOCKS, + "edit_inpaint": EDIT_INPAINT_BLOCKS, + "inpaint": INPAINT_BLOCKS, + "controlnet": CONTROLNET_BLOCKS, + "auto": AUTO_BLOCKS, + "edit_auto": EDIT_AUTO_BLOCKS, +} diff --git a/src/diffusers/modular_pipelines/qwenimage/modular_pipeline.py b/src/diffusers/modular_pipelines/qwenimage/modular_pipeline.py new file mode 100644 index 0000000000..fe9757f41b --- /dev/null +++ b/src/diffusers/modular_pipelines/qwenimage/modular_pipeline.py @@ -0,0 +1,202 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import QwenImageLoraLoaderMixin +from ..modular_pipeline import ModularPipeline + + +class QwenImagePachifier(ConfigMixin): + """ + A class to pack and unpack latents for QwenImage. + """ + + config_name = "config.json" + + @register_to_config + def __init__( + self, + patch_size: int = 2, + ): + super().__init__() + + def pack_latents(self, latents): + if latents.ndim != 4 and latents.ndim != 5: + raise ValueError(f"Latents must have 4 or 5 dimensions, but got {latents.ndim}") + + if latents.ndim == 4: + latents = latents.unsqueeze(2) + + batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width = latents.shape + patch_size = self.config.patch_size + + if latent_height % patch_size != 0 or latent_width % patch_size != 0: + raise ValueError( + f"Latent height and width must be divisible by {patch_size}, but got {latent_height} and {latent_width}" + ) + + latents = latents.view( + batch_size, + num_channels_latents, + latent_height // patch_size, + patch_size, + latent_width // patch_size, + patch_size, + ) + latents = latents.permute( + 0, 2, 4, 1, 3, 5 + ) # Batch_size, num_patches_height, num_patches_width, num_channels_latents, patch_size, patch_size + latents = latents.reshape( + batch_size, + (latent_height // patch_size) * (latent_width // patch_size), + num_channels_latents * patch_size * patch_size, + ) + + return latents + + def unpack_latents(self, latents, height, width, vae_scale_factor=8): + if latents.ndim != 3: + raise ValueError(f"Latents must have 3 dimensions, but got {latents.ndim}") + + batch_size, num_patches, channels = latents.shape + patch_size = self.config.patch_size + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = patch_size * (int(height) // (vae_scale_factor * patch_size)) + width = patch_size * (int(width) // (vae_scale_factor * patch_size)) + + latents = latents.view( + batch_size, + height // patch_size, + width // patch_size, + channels // (patch_size * patch_size), + patch_size, + patch_size, + ) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (patch_size * patch_size), 1, height, width) + + return latents + + +class QwenImageModularPipeline(ModularPipeline, QwenImageLoraLoaderMixin): + """ + A ModularPipeline for QwenImage. + + + + This is an experimental feature and is likely to change in the future. + + + """ + + @property + def default_height(self): + return self.default_sample_size * self.vae_scale_factor + + @property + def default_width(self): + return self.default_sample_size * self.vae_scale_factor + + @property + def default_sample_size(self): + return 128 + + @property + def vae_scale_factor(self): + vae_scale_factor = 8 + if hasattr(self, "vae") and self.vae is not None: + vae_scale_factor = 2 ** len(self.vae.temperal_downsample) + return vae_scale_factor + + @property + def num_channels_latents(self): + num_channels_latents = 16 + if hasattr(self, "transformer") and self.transformer is not None: + num_channels_latents = self.transformer.config.in_channels // 4 + return num_channels_latents + + @property + def is_guidance_distilled(self): + is_guidance_distilled = False + if hasattr(self, "transformer") and self.transformer is not None: + is_guidance_distilled = self.transformer.config.guidance_embeds + return is_guidance_distilled + + @property + def requires_unconditional_embeds(self): + requires_unconditional_embeds = False + + if hasattr(self, "guider") and self.guider is not None: + requires_unconditional_embeds = self.guider._enabled and self.guider.num_conditions > 1 + + return requires_unconditional_embeds + + +class QwenImageEditModularPipeline(ModularPipeline, QwenImageLoraLoaderMixin): + """ + A ModularPipeline for QwenImage-Edit. + + + + This is an experimental feature and is likely to change in the future. + + + """ + + # YiYi TODO: qwen edit should not provide default height/width, should be derived from the resized input image (after adjustment) produced by the resize step. + @property + def default_height(self): + return self.default_sample_size * self.vae_scale_factor + + @property + def default_width(self): + return self.default_sample_size * self.vae_scale_factor + + @property + def default_sample_size(self): + return 128 + + @property + def vae_scale_factor(self): + vae_scale_factor = 8 + if hasattr(self, "vae") and self.vae is not None: + vae_scale_factor = 2 ** len(self.vae.temperal_downsample) + return vae_scale_factor + + @property + def num_channels_latents(self): + num_channels_latents = 16 + if hasattr(self, "transformer") and self.transformer is not None: + num_channels_latents = self.transformer.config.in_channels // 4 + return num_channels_latents + + @property + def is_guidance_distilled(self): + is_guidance_distilled = False + if hasattr(self, "transformer") and self.transformer is not None: + is_guidance_distilled = self.transformer.config.guidance_embeds + return is_guidance_distilled + + @property + def requires_unconditional_embeds(self): + requires_unconditional_embeds = False + + if hasattr(self, "guider") and self.guider is not None: + requires_unconditional_embeds = self.guider._enabled and self.guider.num_conditions > 1 + + return requires_unconditional_embeds diff --git a/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py b/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py index 0ee37f5201..e84f5cad1a 100644 --- a/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py @@ -76,6 +76,7 @@ class StableDiffusionXLModularPipeline( vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) return vae_scale_factor + # YiYi TODO: change to num_channels_latents @property def num_channels_unet(self): num_channels_unet = 4 diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index ebabf17995..880984eeb8 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -91,6 +91,14 @@ from .pag import ( StableDiffusionXLPAGPipeline, ) from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline +from .qwenimage import ( + QwenImageControlNetPipeline, + QwenImageEditInpaintPipeline, + QwenImageEditPipeline, + QwenImageImg2ImgPipeline, + QwenImageInpaintPipeline, + QwenImagePipeline, +) from .sana import SanaPipeline from .stable_cascade import StableCascadeCombinedPipeline, StableCascadeDecoderPipeline from .stable_diffusion import ( @@ -150,6 +158,8 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict( ("cogview3", CogView3PlusPipeline), ("cogview4", CogView4Pipeline), ("cogview4-control", CogView4ControlPipeline), + ("qwenimage", QwenImagePipeline), + ("qwenimage-controlnet", QwenImageControlNetPipeline), ] ) @@ -174,6 +184,8 @@ AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict( ("flux-controlnet", FluxControlNetImg2ImgPipeline), ("flux-control", FluxControlImg2ImgPipeline), ("flux-kontext", FluxKontextPipeline), + ("qwenimage", QwenImageImg2ImgPipeline), + ("qwenimage-edit", QwenImageEditPipeline), ] ) @@ -195,6 +207,8 @@ AUTO_INPAINT_PIPELINES_MAPPING = OrderedDict( ("flux-controlnet", FluxControlNetInpaintPipeline), ("flux-control", FluxControlInpaintPipeline), ("stable-diffusion-pag", StableDiffusionPAGInpaintPipeline), + ("qwenimage", QwenImageInpaintPipeline), + ("qwenimage-edit", QwenImageEditInpaintPipeline), ] ) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 91eefc5c10..cd4d965e57 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -32,6 +32,66 @@ class FluxModularPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class QwenImageAutoBlocks(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class QwenImageEditAutoBlocks(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class QwenImageEditModularPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class QwenImageModularPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class StableDiffusionXLAutoBlocks(metaclass=DummyObject): _backends = ["torch", "transformers"] From 4e36bb0d23a0450079560ac12d2858e2eb3f7e24 Mon Sep 17 00:00:00 2001 From: "Frank (Haofan) Wang" Date: Tue, 9 Sep 2025 08:59:26 +0800 Subject: [PATCH 53/74] Support ControlNet-Inpainting for Qwen-Image (#12301) * add qwen-image-cn-inpaint --------- Co-authored-by: github-actions[bot] Co-authored-by: yiyixuxu --- src/diffusers/__init__.py | 2 + src/diffusers/pipelines/__init__.py | 2 + src/diffusers/pipelines/qwenimage/__init__.py | 2 + .../pipeline_qwenimage_controlnet_inpaint.py | 941 ++++++++++++++++++ .../dummy_torch_and_transformers_objects.py | 15 + 5 files changed, 962 insertions(+) create mode 100644 src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet_inpaint.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 4c06440172..d96acc3818 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -510,6 +510,7 @@ else: "PixArtAlphaPipeline", "PixArtSigmaPAGPipeline", "PixArtSigmaPipeline", + "QwenImageControlNetInpaintPipeline", "QwenImageControlNetPipeline", "QwenImageEditInpaintPipeline", "QwenImageEditPipeline", @@ -1163,6 +1164,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: PixArtAlphaPipeline, PixArtSigmaPAGPipeline, PixArtSigmaPipeline, + QwenImageControlNetInpaintPipeline, QwenImageControlNetPipeline, QwenImageEditInpaintPipeline, QwenImageEditPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 25d5d213cf..8ed07a72e3 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -394,6 +394,7 @@ else: "QwenImageInpaintPipeline", "QwenImageEditPipeline", "QwenImageEditInpaintPipeline", + "QwenImageControlNetInpaintPipeline", "QwenImageControlNetPipeline", ] try: @@ -714,6 +715,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pia import PIAPipeline from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline from .qwenimage import ( + QwenImageControlNetInpaintPipeline, QwenImageControlNetPipeline, QwenImageEditInpaintPipeline, QwenImageEditPipeline, diff --git a/src/diffusers/pipelines/qwenimage/__init__.py b/src/diffusers/pipelines/qwenimage/__init__.py index ae5cf04dc5..36d92917fd 100644 --- a/src/diffusers/pipelines/qwenimage/__init__.py +++ b/src/diffusers/pipelines/qwenimage/__init__.py @@ -25,6 +25,7 @@ else: _import_structure["modeling_qwenimage"] = ["ReduxImageEncoder"] _import_structure["pipeline_qwenimage"] = ["QwenImagePipeline"] _import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"] + _import_structure["pipeline_qwenimage_controlnet_inpaint"] = ["QwenImageControlNetInpaintPipeline"] _import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"] _import_structure["pipeline_qwenimage_edit_inpaint"] = ["QwenImageEditInpaintPipeline"] _import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"] @@ -39,6 +40,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: else: from .pipeline_qwenimage import QwenImagePipeline from .pipeline_qwenimage_controlnet import QwenImageControlNetPipeline + from .pipeline_qwenimage_controlnet_inpaint import QwenImageControlNetInpaintPipeline from .pipeline_qwenimage_edit import QwenImageEditPipeline from .pipeline_qwenimage_edit_inpaint import QwenImageEditInpaintPipeline from .pipeline_qwenimage_img2img import QwenImageImg2ImgPipeline diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet_inpaint.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet_inpaint.py new file mode 100644 index 0000000000..102a813ab5 --- /dev/null +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet_inpaint.py @@ -0,0 +1,941 @@ +# Copyright 2025 Qwen-Image Team, The InstantX Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import QwenImageLoraLoaderMixin +from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel +from ...models.controlnets.controlnet_qwenimage import QwenImageControlNetModel, QwenImageMultiControlNetModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import QwenImagePipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers.utils import load_image + >>> from diffusers import QwenImageControlNetModel, QwenImageControlNetInpaintPipeline + + >>> base_model_path = "Qwen/Qwen-Image" + >>> controlnet_model_path = "InstantX/Qwen-Image-ControlNet-Inpainting" + >>> controlnet = QwenImageControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.bfloat16) + >>> pipe = QwenImageControlNetInpaintPipeline.from_pretrained( + ... base_model_path, controlnet=controlnet, torch_dtype=torch.bfloat16 + ... ).to("cuda") + >>> image = load_image( + ... "https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting/resolve/main/assets/images/image1.png" + ... ) + >>> mask_image = load_image( + ... "https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting/resolve/main/assets/masks/mask1.png" + ... ) + >>> prompt = "一辆绿色的出租车行驶在路上" + >>> result = pipe( + ... prompt=prompt, + ... control_image=image, + ... control_mask=mask_image, + ... controlnet_conditioning_scale=1.0, + ... width=mask_image.size[0], + ... height=mask_image.size[1], + ... true_cfg_scale=4.0, + ... ).images[0] + >>> image.save("qwenimage_controlnet_inpaint.png") + ``` +""" + + +# Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.15, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class QwenImageControlNetInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): + r""" + The QwenImage pipeline for text-to-image generation. + + Args: + transformer ([`QwenImageTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`Qwen2.5-VL-7B-Instruct`]): + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant. + tokenizer (`QwenTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + """ + + model_cpu_offload_seq = "text_encoder->transformer->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKLQwenImage, + text_encoder: Qwen2_5_VLForConditionalGeneration, + tokenizer: Qwen2Tokenizer, + transformer: QwenImageTransformer2DModel, + controlnet: QwenImageControlNetModel, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + transformer=transformer, + scheduler=scheduler, + controlnet=controlnet, + ) + self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 + # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible + # by the patch size. So the vae scale factor is multiplied by the patch size to account for this + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) + + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor * 2, + do_resize=True, + do_convert_grayscale=True, + do_normalize=False, + do_binarize=True, + ) + + self.tokenizer_max_length = 1024 + self.prompt_template_encode = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + self.prompt_template_encode_start_idx = 34 + self.default_sample_size = 128 + + # Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.extract_masked_hidden + def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor): + bool_mask = mask.bool() + valid_lengths = bool_mask.sum(dim=1) + selected = hidden_states[bool_mask] + split_result = torch.split(selected, valid_lengths.tolist(), dim=0) + + return split_result + + # Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.get_qwen_prompt_embeds + def _get_qwen_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + + template = self.prompt_template_encode + drop_idx = self.prompt_template_encode_start_idx + txt = [template.format(e) for e in prompt] + txt_tokens = self.tokenizer( + txt, max_length=self.tokenizer_max_length + drop_idx, padding=True, truncation=True, return_tensors="pt" + ).to(self.device) + encoder_hidden_states = self.text_encoder( + input_ids=txt_tokens.input_ids, + attention_mask=txt_tokens.attention_mask, + output_hidden_states=True, + ) + hidden_states = encoder_hidden_states.hidden_states[-1] + split_hidden_states = self._extract_masked_hidden(hidden_states, txt_tokens.attention_mask) + split_hidden_states = [e[drop_idx:] for e in split_hidden_states] + attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states] + max_seq_len = max([e.size(0) for e in split_hidden_states]) + prompt_embeds = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states] + ) + encoder_attention_mask = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list] + ) + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + return prompt_embeds, encoder_attention_mask + + # Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + max_sequence_length: int = 1024, + ): + r""" + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, device) + + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1) + prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len) + + return prompt_embeds, prompt_embeds_mask + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_embeds_mask=None, + negative_prompt_embeds_mask=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: + logger.warning( + f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_embeds_mask is None: + raise ValueError( + "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`." + ) + if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 1024: + raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}") + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (vae_scale_factor * 2)) + width = 2 * (int(width) // (vae_scale_factor * 2)) + + latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), 1, height, width) + + return latents + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (self.vae_scale_factor * 2)) + width = 2 * (int(width) // (self.vae_scale_factor * 2)) + + shape = (batch_size, 1, num_channels_latents, height, width) + + if latents is not None: + return latents.to(device=device, dtype=dtype) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + + return latents + + # Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + if isinstance(image, torch.Tensor): + pass + else: + image = self.image_processor.preprocess(image, height=height, width=width) + + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + def prepare_image_with_mask( + self, + image, + mask, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + if isinstance(image, torch.Tensor): + pass + else: + image = self.image_processor.preprocess(image, height=height, width=width) + + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + image = image.to(device=device, dtype=dtype) # (bsz, 3, height_ori, width_ori) + + # Prepare mask + if isinstance(mask, torch.Tensor): + pass + else: + mask = self.mask_processor.preprocess(mask, height=height, width=width) + mask = mask.repeat_interleave(repeat_by, dim=0) + mask = mask.to(device=device, dtype=dtype) # (bsz, 1, height_ori, width_ori) + + if image.ndim == 4: + image = image.unsqueeze(2) + + if mask.ndim == 4: + mask = mask.unsqueeze(2) + + # Get masked image + masked_image = image.clone() + masked_image[(mask > 0.5).repeat(1, 3, 1, 1, 1)] = -1 # (bsz, 3, 1, height_ori, width_ori) + + self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) + latents_mean = (torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1)).to(device) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + device + ) + + # Encode to latents + image_latents = self.vae.encode(masked_image.to(self.vae.dtype)).latent_dist.sample() + image_latents = (image_latents - latents_mean) * latents_std + image_latents = image_latents.to(dtype) # torch.Size([1, 16, 1, height_ori//8, width_ori//8]) + + mask = torch.nn.functional.interpolate( + mask, size=(image_latents.shape[-3], image_latents.shape[-2], image_latents.shape[-1]) + ) + mask = 1 - mask # torch.Size([1, 1, 1, height_ori//8, width_ori//8]) + + control_image = torch.cat( + [image_latents, mask], dim=1 + ) # torch.Size([1, 16+1, 1, height_ori//8, width_ori//8]) + + control_image = control_image.permute(0, 2, 1, 3, 4) # torch.Size([1, 1, 16+1, height_ori//8, width_ori//8]) + + # pack + control_image = self._pack_latents( + control_image, + batch_size=control_image.shape[0], + num_channels_latents=control_image.shape[2], + height=control_image.shape[3], + width=control_image.shape[4], + ) + + if do_classifier_free_guidance and not guess_mode: + control_image = torch.cat([control_image] * 2) + + return control_image + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def attention_kwargs(self): + return self._attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def current_timestep(self): + return self._current_timestep + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: Union[str, List[str]] = None, + true_cfg_scale: float = 4.0, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + sigmas: Optional[List[float]] = None, + guidance_scale: float = 1.0, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + control_image: PipelineImageInput = None, + control_mask: PipelineImageInput = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is + not greater than `1`). + true_cfg_scale (`float`, *optional*, defaults to 1.0): + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will be generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple. + attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is a list with the generated images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(control_image) if isinstance(self.controlnet, QwenImageMultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + negative_prompt_embeds_mask=negative_prompt_embeds_mask, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._attention_kwargs = attention_kwargs + self._current_timestep = None + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + has_neg_prompt = negative_prompt is not None or ( + negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + prompt_embeds, prompt_embeds_mask = self.encode_prompt( + prompt=prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + if do_true_cfg: + negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt( + prompt=negative_prompt, + prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=negative_prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + # 3. Prepare control image + num_channels_latents = self.transformer.config.in_channels // 4 + if isinstance(self.controlnet, QwenImageControlNetModel): + control_image = self.prepare_image_with_mask( + image=control_image, + mask=control_mask, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=self.vae.dtype, + ) + + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + img_shapes = [(1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2)] * batch_size + + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.get("base_image_seq_len", 256), + self.scheduler.config.get("max_image_seq_len", 4096), + self.scheduler.config.get("base_shift", 0.5), + self.scheduler.config.get("max_shift", 1.15), + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + sigmas=sigmas, + mu=mu, + ) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(self.controlnet, QwenImageControlNetModel) else keeps) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + + if self.attention_kwargs is None: + self._attention_kwargs = {} + + # 6. Denoising loop + self.scheduler.set_begin_index(0) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + # controlnet + controlnet_block_samples = self.controlnet( + hidden_states=latents, + controlnet_cond=control_image.to(dtype=latents.dtype, device=device), + conditioning_scale=cond_scale, + timestep=timestep / 1000, + encoder_hidden_states=prompt_embeds, + encoder_hidden_states_mask=prompt_embeds_mask, + img_shapes=img_shapes, + txt_seq_lens=prompt_embeds_mask.sum(dim=1).tolist(), + return_dict=False, + ) + + with self.transformer.cache_context("cond"): + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + encoder_hidden_states=prompt_embeds, + encoder_hidden_states_mask=prompt_embeds_mask, + img_shapes=img_shapes, + txt_seq_lens=prompt_embeds_mask.sum(dim=1).tolist(), + controlnet_block_samples=controlnet_block_samples, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + + if do_true_cfg: + with self.transformer.cache_context("uncond"): + neg_noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + encoder_hidden_states_mask=negative_prompt_embeds_mask, + encoder_hidden_states=negative_prompt_embeds, + img_shapes=img_shapes, + txt_seq_lens=negative_prompt_embeds_mask.sum(dim=1).tolist(), + controlnet_block_samples=controlnet_block_samples, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) + + cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True) + noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True) + noise_pred = comb_pred * (cond_norm / noise_norm) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + self._current_timestep = None + if output_type == "latent": + image = latents + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = latents.to(self.vae.dtype) + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.vae.config.z_dim, 1, 1, 1) + .to(latents.device, latents.dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + latents.device, latents.dtype + ) + latents = latents / latents_std + latents_mean + image = self.vae.decode(latents, return_dict=False)[0][:, :, 0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return QwenImagePipelineOutput(images=image) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index cd4d965e57..00792fa55a 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1817,6 +1817,21 @@ class PixArtSigmaPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class QwenImageControlNetInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class QwenImageControlNetPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] From c222570a9b47901266fecf34222f540870c3bb1b Mon Sep 17 00:00:00 2001 From: Leo Jiang Date: Tue, 9 Sep 2025 15:28:08 +0800 Subject: [PATCH 54/74] DeepSpeed adaption for flux-kontext (#12240) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: J石页 Co-authored-by: Sayak Paul --- .../train_dreambooth_lora_flux_kontext.py | 43 +++++++++++++------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py index 87e0d2c29e..03c05a05e0 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py +++ b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py @@ -29,8 +29,9 @@ from pathlib import Path import numpy as np import torch import transformers -from accelerate import Accelerator +from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger +from accelerate.state import AcceleratorState from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from huggingface_hub.utils import insecure_hashlib @@ -1222,6 +1223,9 @@ def main(args): kwargs_handlers=[kwargs], ) + if accelerator.distributed_type == DistributedType.DEEPSPEED: + AcceleratorState().deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = args.train_batch_size + # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False @@ -1438,17 +1442,20 @@ def main(args): text_encoder_one_lora_layers_to_save = None modules_to_save = {} for model in models: - if isinstance(model, type(unwrap_model(transformer))): + if isinstance(unwrap_model(model), type(unwrap_model(transformer))): + model = unwrap_model(model) transformer_lora_layers_to_save = get_peft_model_state_dict(model) modules_to_save["transformer"] = model - elif isinstance(model, type(unwrap_model(text_encoder_one))): + elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_one))): + model = unwrap_model(model) text_encoder_one_lora_layers_to_save = get_peft_model_state_dict(model) modules_to_save["text_encoder"] = model else: raise ValueError(f"unexpected save model: {model.__class__}") # make sure to pop weight so that corresponding model is not saved again - weights.pop() + if weights: + weights.pop() FluxKontextPipeline.save_lora_weights( output_dir, @@ -1461,15 +1468,25 @@ def main(args): transformer_ = None text_encoder_one_ = None - while len(models) > 0: - model = models.pop() + if not accelerator.distributed_type == DistributedType.DEEPSPEED: + while len(models) > 0: + model = models.pop() - if isinstance(model, type(unwrap_model(transformer))): - transformer_ = model - elif isinstance(model, type(unwrap_model(text_encoder_one))): - text_encoder_one_ = model - else: - raise ValueError(f"unexpected save model: {model.__class__}") + if isinstance(unwrap_model(model), type(unwrap_model(transformer))): + transformer_ = unwrap_model(model) + elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_one))): + text_encoder_one_ = unwrap_model(model) + else: + raise ValueError(f"unexpected save model: {model.__class__}") + + else: + transformer_ = FluxTransformer2DModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="transformer" + ) + transformer_.add_adapter(transformer_lora_config) + text_encoder_one_ = text_encoder_cls_one.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder" + ) lora_state_dict = FluxKontextPipeline.lora_state_dict(input_dir) @@ -2069,7 +2086,7 @@ def main(args): progress_bar.update(1) global_step += 1 - if accelerator.is_main_process: + if accelerator.is_main_process or accelerator.distributed_type == DistributedType.DEEPSPEED: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: From 28106fcac4fd13e7ced5c9eb6803f107e804a08f Mon Sep 17 00:00:00 2001 From: calcuis <113646141+calcuis@users.noreply.github.com> Date: Tue, 9 Sep 2025 04:40:21 -0700 Subject: [PATCH 55/74] gguf new quant type support (with demo) (#12076) * Update utils.py not perfect but works engine: https://github.com/calcuis/gguf-connector/blob/main/src/gguf_connector/quant2c.py inference example(s): https://github.com/calcuis/gguf-connector/blob/main/src/gguf_connector/k6.py https://github.com/calcuis/gguf-connector/blob/main/src/gguf_connector/k5.py gguf file sample(s): https://huggingface.co/calcuis/kontext-gguf/tree/main https://huggingface.co/calcuis/krea-gguf/tree/main * Apply style fixes --------- Co-authored-by: github-actions[bot] --- src/diffusers/quantizers/gguf/utils.py | 56 ++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/src/diffusers/quantizers/gguf/utils.py b/src/diffusers/quantizers/gguf/utils.py index 3dd00b2ce3..2fba9986e8 100644 --- a/src/diffusers/quantizers/gguf/utils.py +++ b/src/diffusers/quantizers/gguf/utils.py @@ -429,8 +429,64 @@ def dequantize_blocks_BF16(blocks, block_size, type_size, dtype=None): return (blocks.view(torch.int16).to(torch.int32) << 16).view(torch.float32) +# this part from calcuis (gguf.org) +# more info: https://github.com/calcuis/gguf-connector/blob/main/src/gguf_connector/quant2c.py + + +def dequantize_blocks_IQ4_NL(blocks, block_size, type_size, dtype=None): + kvalues = torch.tensor( + [-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113], + dtype=torch.float32, + device=blocks.device, + ) + n_blocks = blocks.shape[0] + d, qs = split_block_dims(blocks, 2) + d = d.view(torch.float16).to(dtype) + qs = qs.reshape((n_blocks, -1, 1, block_size // 2)) >> torch.tensor( + [0, 4], device=blocks.device, dtype=torch.uint8 + ).reshape((1, 1, 2, 1)) + qs = (qs & 15).reshape((n_blocks, -1)).to(torch.int64) + kvalues = kvalues.view(1, 1, 16) + qs = qs.unsqueeze(-1) + qs = torch.gather(kvalues.expand(qs.shape[0], qs.shape[1], 16), 2, qs) + qs = qs.squeeze(-1).to(dtype) + return d * qs + + +def dequantize_blocks_IQ4_XS(blocks, block_size, type_size, dtype=None): + kvalues = torch.tensor( + [-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113], + dtype=torch.float32, + device=blocks.device, + ) + n_blocks = blocks.shape[0] + d, scales_h, scales_l, qs = split_block_dims(blocks, 2, 2, QK_K // 64) + d = d.view(torch.float16).to(dtype) + scales_h = scales_h.view(torch.int16) + scales_l = scales_l.reshape((n_blocks, -1, 1)) >> torch.tensor( + [0, 4], device=blocks.device, dtype=torch.uint8 + ).reshape((1, 1, 2)) + scales_h = scales_h.reshape((n_blocks, 1, -1)) >> torch.tensor( + [2 * i for i in range(QK_K // 32)], device=blocks.device, dtype=torch.uint8 + ).reshape((1, -1, 1)) + scales_l = scales_l.reshape((n_blocks, -1)) & 0x0F + scales_h = scales_h.reshape((n_blocks, -1)) & 0x03 + scales = (scales_l | (scales_h << 4)) - 32 + dl = (d * scales.to(dtype)).reshape((n_blocks, -1, 1)) + shifts_q = torch.tensor([0, 4], device=blocks.device, dtype=torch.uint8).reshape(1, 1, 2, 1) + qs = qs.reshape((n_blocks, -1, 1, 16)) >> shifts_q + qs = (qs & 15).reshape((n_blocks, -1, 32)).to(torch.int64) + kvalues = kvalues.view(1, 1, 1, 16) + qs = qs.unsqueeze(-1) + qs = torch.gather(kvalues.expand(qs.shape[0], qs.shape[1], qs.shape[2], 16), 3, qs) + qs = qs.squeeze(-1).to(dtype) + return (dl * qs).reshape(n_blocks, -1) + + GGML_QUANT_SIZES = gguf.GGML_QUANT_SIZES dequantize_functions = { + gguf.GGMLQuantizationType.IQ4_NL: dequantize_blocks_IQ4_NL, + gguf.GGMLQuantizationType.IQ4_XS: dequantize_blocks_IQ4_XS, gguf.GGMLQuantizationType.BF16: dequantize_blocks_BF16, gguf.GGMLQuantizationType.Q8_0: dequantize_blocks_Q8_0, gguf.GGMLQuantizationType.Q5_1: dequantize_blocks_Q5_1, From 4067d6c4b64f2b606f9806d4a8b15d5fd5cbea1e Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Wed, 10 Sep 2025 05:36:03 +0800 Subject: [PATCH 56/74] adjust criteria for marigold-intrinsics example on XPU (#12290) adjust criteria for XPU Signed-off-by: Liu, Kaixuan Co-authored-by: Aryan --- .../marigold/test_marigold_intrinsics.py | 67 ++++++++++++++++++- 1 file changed, 64 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/marigold/test_marigold_intrinsics.py b/tests/pipelines/marigold/test_marigold_intrinsics.py index 3f7ab9bf6e..7db14b67ce 100644 --- a/tests/pipelines/marigold/test_marigold_intrinsics.py +++ b/tests/pipelines/marigold/test_marigold_intrinsics.py @@ -34,6 +34,7 @@ from diffusers import ( ) from ...testing_utils import ( + Expectations, backend_empty_cache, enable_full_determinism, floats_tensor, @@ -416,7 +417,7 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase): expected_slice: np.ndarray = None, model_id: str = "prs-eth/marigold-iid-appearance-v1-1", image_url: str = "https://marigoldmonodepth.github.io/images/einstein.jpg", - atol: float = 1e-4, + atol: float = 1e-3, **pipe_kwargs, ): from_pretrained_kwargs = {} @@ -531,11 +532,41 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase): ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E3_B1_M1(self): + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + 0.62655, + 0.62477, + 0.62161, + 0.62452, + 0.62454, + 0.62454, + 0.62255, + 0.62647, + 0.63379, + ] + ), + ("cuda", 7): np.array( + [ + 0.61572, + 0.1377, + 0.61182, + 0.61426, + 0.61377, + 0.61426, + 0.61279, + 0.61572, + 0.62354, + ] + ), + } + ) self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, - expected_slice=np.array([0.61572, 0.61377, 0.61182, 0.61426, 0.61377, 0.61426, 0.61279, 0.61572, 0.62354]), + expected_slice=expected_slices.get_expectation(), num_inference_steps=1, processing_resolution=768, ensemble_size=3, @@ -545,11 +576,41 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase): ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E4_B2_M1(self): + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + 0.62988, + 0.62792, + 0.62548, + 0.62841, + 0.62792, + 0.62792, + 0.62646, + 0.62939, + 0.63721, + ] + ), + ("cuda", 7): np.array( + [ + 0.61914, + 0.6167, + 0.61475, + 0.61719, + 0.61719, + 0.61768, + 0.61572, + 0.61914, + 0.62695, + ] + ), + } + ) self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, - expected_slice=np.array([0.61914, 0.6167, 0.61475, 0.61719, 0.61719, 0.61768, 0.61572, 0.61914, 0.62695]), + expected_slice=expected_slices.get_expectation(), num_inference_steps=1, processing_resolution=768, ensemble_size=4, From 43459079ab06d4f94f2d93ba7153e2c5310928d3 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 10 Sep 2025 09:09:57 +0530 Subject: [PATCH 57/74] [core] feat: support group offloading at the pipeline level (#12283) * feat: support group offloading at the pipeline level. * add tests * up * [docs] Pipeline group offloading (#12286) init Co-authored-by: Sayak Paul --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/en/optimization/memory.md | 49 ++++++++- src/diffusers/pipelines/pipeline_utils.py | 127 ++++++++++++++++++++++ tests/pipelines/test_pipelines_common.py | 68 ++++++++++++ 3 files changed, 241 insertions(+), 3 deletions(-) diff --git a/docs/source/en/optimization/memory.md b/docs/source/en/optimization/memory.md index 78fd96e027..611e07ec76 100644 --- a/docs/source/en/optimization/memory.md +++ b/docs/source/en/optimization/memory.md @@ -291,13 +291,53 @@ Group offloading moves groups of internal layers ([torch.nn.ModuleList](https:// > [!WARNING] > Group offloading may not work with all models if the forward implementation contains weight-dependent device casting of inputs because it may clash with group offloading's device casting mechanism. -Call [`~ModelMixin.enable_group_offload`] to enable it for standard Diffusers model components that inherit from [`ModelMixin`]. For other model components that don't inherit from [`ModelMixin`], such as a generic [torch.nn.Module](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), use [`~hooks.apply_group_offloading`] instead. - -The `offload_type` parameter can be set to `block_level` or `leaf_level`. +Enable group offloading by configuring the `offload_type` parameter to `block_level` or `leaf_level`. - `block_level` offloads groups of layers based on the `num_blocks_per_group` parameter. For example, if `num_blocks_per_group=2` on a model with 40 layers, 2 layers are onloaded and offloaded at a time (20 total onloads/offloads). This drastically reduces memory requirements. - `leaf_level` offloads individual layers at the lowest level and is equivalent to [CPU offloading](#cpu-offloading). But it can be made faster if you use streams without giving up inference speed. +Group offloading is supported for entire pipelines or individual models. Applying group offloading to the entire pipeline is the easiest option while selectively applying it to individual models gives users more flexibility to use different offloading techniques for different models. + + + + +Call [`~DiffusionPipeline.enable_group_offload`] on a pipeline. + +```py +import torch +from diffusers import CogVideoXPipeline +from diffusers.hooks import apply_group_offloading +from diffusers.utils import export_to_video + +onload_device = torch.device("cuda") +offload_device = torch.device("cpu") + +pipeline = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) +pipeline.enable_group_offload( + onload_device=onload_device, + offload_device=offload_device, + offload_type="leaf_level", + use_stream=True +) + +prompt = ( + "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " + "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " + "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " + "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " + "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " + "atmosphere of this unique musical performance." +) +video = pipeline(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +export_to_video(video, "output.mp4", fps=8) +``` + + + + +Call [`~ModelMixin.enable_group_offload`] on standard Diffusers model components that inherit from [`ModelMixin`]. For other model components that don't inherit from [`ModelMixin`], such as a generic [torch.nn.Module](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), use [`~hooks.apply_group_offloading`] instead. + ```py import torch from diffusers import CogVideoXPipeline @@ -328,6 +368,9 @@ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} G export_to_video(video, "output.mp4", fps=8) ``` + + + #### CUDA stream The `use_stream` parameter can be activated for CUDA devices that support asynchronous data transfer streams to reduce overall execution time compared to [CPU offloading](#cpu-offloading). It overlaps data transfer and computation by using layer prefetching. The next layer to be executed is loaded onto the GPU while the current layer is still being executed. It can increase CPU memory significantly so ensure you have 2x the amount of memory as the model size. diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 023feae4dd..0116ad917c 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -1334,6 +1334,133 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): offload_buffers = len(model._parameters) > 0 cpu_offload(model, device, offload_buffers=offload_buffers) + def enable_group_offload( + self, + onload_device: torch.device, + offload_device: torch.device = torch.device("cpu"), + offload_type: str = "block_level", + num_blocks_per_group: Optional[int] = None, + non_blocking: bool = False, + use_stream: bool = False, + record_stream: bool = False, + low_cpu_mem_usage=False, + offload_to_disk_path: Optional[str] = None, + exclude_modules: Optional[Union[str, List[str]]] = None, + ) -> None: + r""" + Applies group offloading to the internal layers of a torch.nn.Module. To understand what group offloading is, + and where it is beneficial, we need to first provide some context on how other supported offloading methods + work. + + Typically, offloading is done at two levels: + - Module-level: In Diffusers, this can be enabled using the `ModelMixin::enable_model_cpu_offload()` method. It + works by offloading each component of a pipeline to the CPU for storage, and onloading to the accelerator + device when needed for computation. This method is more memory-efficient than keeping all components on the + accelerator, but the memory requirements are still quite high. For this method to work, one needs memory + equivalent to size of the model in runtime dtype + size of largest intermediate activation tensors to be able + to complete the forward pass. + - Leaf-level: In Diffusers, this can be enabled using the `ModelMixin::enable_sequential_cpu_offload()` method. + It + works by offloading the lowest leaf-level parameters of the computation graph to the CPU for storage, and + onloading only the leafs to the accelerator device for computation. This uses the lowest amount of accelerator + memory, but can be slower due to the excessive number of device synchronizations. + + Group offloading is a middle ground between the two methods. It works by offloading groups of internal layers, + (either `torch.nn.ModuleList` or `torch.nn.Sequential`). This method uses lower memory than module-level + offloading. It is also faster than leaf-level/sequential offloading, as the number of device synchronizations + is reduced. + + Another supported feature (for CUDA devices with support for asynchronous data transfer streams) is the ability + to overlap data transfer and computation to reduce the overall execution time compared to sequential + offloading. This is enabled using layer prefetching with streams, i.e., the layer that is to be executed next + starts onloading to the accelerator device while the current layer is being executed - this increases the + memory requirements slightly. Note that this implementation also supports leaf-level offloading but can be made + much faster when using streams. + + Args: + onload_device (`torch.device`): + The device to which the group of modules are onloaded. + offload_device (`torch.device`, defaults to `torch.device("cpu")`): + The device to which the group of modules are offloaded. This should typically be the CPU. Default is + CPU. + offload_type (`str` or `GroupOffloadingType`, defaults to "block_level"): + The type of offloading to be applied. Can be one of "block_level" or "leaf_level". Default is + "block_level". + offload_to_disk_path (`str`, *optional*, defaults to `None`): + The path to the directory where parameters will be offloaded. Setting this option can be useful in + limited RAM environment settings where a reasonable speed-memory trade-off is desired. + num_blocks_per_group (`int`, *optional*): + The number of blocks per group when using offload_type="block_level". This is required when using + offload_type="block_level". + non_blocking (`bool`, defaults to `False`): + If True, offloading and onloading is done with non-blocking data transfer. + use_stream (`bool`, defaults to `False`): + If True, offloading and onloading is done asynchronously using a CUDA stream. This can be useful for + overlapping computation and data transfer. + record_stream (`bool`, defaults to `False`): When enabled with `use_stream`, it marks the current tensor + as having been used by this stream. It is faster at the expense of slightly more memory usage. Refer to + the [PyTorch official docs](https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html) + more details. + low_cpu_mem_usage (`bool`, defaults to `False`): + If True, the CPU memory usage is minimized by pinning tensors on-the-fly instead of pre-pinning them. + This option only matters when using streamed CPU offloading (i.e. `use_stream=True`). This can be + useful when the CPU memory is a bottleneck but may counteract the benefits of using streams. + exclude_modules (`Union[str, List[str]]`, defaults to `None`): List of modules to exclude from offloading. + + Example: + ```python + >>> from diffusers import DiffusionPipeline + >>> import torch + + >>> pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=torch.bfloat16) + + >>> pipe.enable_group_offload( + ... onload_device=torch.device("cuda"), + ... offload_device=torch.device("cpu"), + ... offload_type="leaf_level", + ... use_stream=True, + ... ) + >>> image = pipe("a beautiful sunset").images[0] + ``` + """ + from ..hooks import apply_group_offloading + + if isinstance(exclude_modules, str): + exclude_modules = [exclude_modules] + elif exclude_modules is None: + exclude_modules = [] + + unknown = set(exclude_modules) - self.components.keys() + if unknown: + logger.info( + f"The following modules are not present in pipeline: {', '.join(unknown)}. Ignore if this is expected." + ) + + group_offload_kwargs = { + "onload_device": onload_device, + "offload_device": offload_device, + "offload_type": offload_type, + "num_blocks_per_group": num_blocks_per_group, + "non_blocking": non_blocking, + "use_stream": use_stream, + "record_stream": record_stream, + "low_cpu_mem_usage": low_cpu_mem_usage, + "offload_to_disk_path": offload_to_disk_path, + } + for name, component in self.components.items(): + if name not in exclude_modules and isinstance(component, torch.nn.Module): + if hasattr(component, "enable_group_offload"): + component.enable_group_offload(**group_offload_kwargs) + else: + apply_group_offloading(module=component, **group_offload_kwargs) + + if exclude_modules: + for module_name in exclude_modules: + module = getattr(self, module_name, None) + if module is not None and isinstance(module, torch.nn.Module): + module.to(onload_device) + logger.debug(f"Placed `{module_name}` on {onload_device} device as it was in `exclude_modules`.") + def reset_device_map(self): r""" Resets the device maps (if any) to None. diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index dcef33897e..db8209835b 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -9,6 +9,7 @@ from typing import Any, Callable, Dict, Union import numpy as np import PIL.Image +import pytest import torch import torch.nn as nn from huggingface_hub import ModelCard, delete_repo @@ -2362,6 +2363,73 @@ class PipelineTesterMixin: max_diff = np.abs(to_np(out) - to_np(loaded_out)).max() self.assertLess(max_diff, expected_max_difference) + @require_torch_accelerator + def test_pipeline_level_group_offloading_sanity_checks(self): + components = self.get_dummy_components() + pipe: DiffusionPipeline = self.pipeline_class(**components) + + for name, component in pipe.components.items(): + if hasattr(component, "_supports_group_offloading"): + if not component._supports_group_offloading: + pytest.skip(f"{self.pipeline_class.__name__} is not suitable for this test.") + + module_names = sorted( + [name for name, component in pipe.components.items() if isinstance(component, torch.nn.Module)] + ) + exclude_module_name = module_names[0] + offload_device = "cpu" + pipe.enable_group_offload( + onload_device=torch_device, + offload_device=offload_device, + offload_type="leaf_level", + exclude_modules=exclude_module_name, + ) + excluded_module = getattr(pipe, exclude_module_name) + self.assertTrue(torch.device(excluded_module.device).type == torch.device(torch_device).type) + + for name, component in pipe.components.items(): + if name not in [exclude_module_name] and isinstance(component, torch.nn.Module): + # `component.device` prints the `onload_device` type. We should probably override the + # `device` property in `ModelMixin`. + component_device = next(component.parameters())[0].device + self.assertTrue(torch.device(component_device).type == torch.device(offload_device).type) + + @require_torch_accelerator + def test_pipeline_level_group_offloading_inference(self, expected_max_difference=1e-4): + components = self.get_dummy_components() + pipe: DiffusionPipeline = self.pipeline_class(**components) + + for name, component in pipe.components.items(): + if hasattr(component, "_supports_group_offloading"): + if not component._supports_group_offloading: + pytest.skip(f"{self.pipeline_class.__name__} is not suitable for this test.") + + # Regular inference. + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + torch.manual_seed(0) + inputs = self.get_dummy_inputs(torch_device) + inputs["generator"] = torch.manual_seed(0) + out = pipe(**inputs)[0] + + pipe.to("cpu") + del pipe + + # Inference with offloading + pipe: DiffusionPipeline = self.pipeline_class(**components) + offload_device = "cpu" + pipe.enable_group_offload( + onload_device=torch_device, + offload_device=offload_device, + offload_type="leaf_level", + ) + pipe.set_progress_bar_config(disable=None) + inputs["generator"] = torch.manual_seed(0) + out_offload = pipe(**inputs)[0] + + max_diff = np.abs(to_np(out) - to_np(out_offload)).max() + self.assertLess(max_diff, expected_max_difference) + @is_staging_test class PipelinePushToHubTester(unittest.TestCase): From f7b79452b4f8693ed09e572f83909d95331ad884 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 10 Sep 2025 12:39:55 +0530 Subject: [PATCH 58/74] [modular] fix flux modular pipelines for t2i and i2i (#12272) fix flux modular pipelines for t2i and i2i --- src/diffusers/modular_pipelines/flux/before_denoise.py | 5 +++-- src/diffusers/modular_pipelines/flux/modular_blocks.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/diffusers/modular_pipelines/flux/before_denoise.py b/src/diffusers/modular_pipelines/flux/before_denoise.py index 507acce1eb..4272066309 100644 --- a/src/diffusers/modular_pipelines/flux/before_denoise.py +++ b/src/diffusers/modular_pipelines/flux/before_denoise.py @@ -454,6 +454,9 @@ class FluxImg2ImgSetTimestepsStep(ModularPipelineBlocks): block_state = self.get_block_state(state) block_state.device = components._execution_device + block_state.height = block_state.height or components.default_height + block_state.width = block_state.width or components.default_width + scheduler = components.scheduler transformer = components.transformer batch_size = block_state.batch_size * block_state.num_images_per_prompt @@ -659,8 +662,6 @@ class FluxImg2ImgPrepareLatentsStep(ModularPipelineBlocks): def __call__(self, components: FluxModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) - block_state.height = block_state.height or components.default_height - block_state.width = block_state.width or components.default_width block_state.device = components._execution_device block_state.dtype = torch.bfloat16 # TODO: okay to hardcode this? block_state.num_channels_latents = components.num_channels_latents diff --git a/src/diffusers/modular_pipelines/flux/modular_blocks.py b/src/diffusers/modular_pipelines/flux/modular_blocks.py index 04b439f026..37895bddbf 100644 --- a/src/diffusers/modular_pipelines/flux/modular_blocks.py +++ b/src/diffusers/modular_pipelines/flux/modular_blocks.py @@ -148,8 +148,8 @@ TEXT2IMAGE_BLOCKS = InsertableDict( [ ("text_encoder", FluxTextEncoderStep), ("input", FluxInputStep), - ("set_timesteps", FluxSetTimestepsStep), ("prepare_latents", FluxPrepareLatentsStep), + ("set_timesteps", FluxSetTimestepsStep), ("denoise", FluxDenoiseStep), ("decode", FluxDecodeStep), ] From 9e7ae568d60f59517990b652a9c825a7f4caeaf5 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 10 Sep 2025 12:55:32 +0530 Subject: [PATCH 59/74] [feat] cache allocator warmup for `from_single_model` (#12305) * add * add a test --- src/diffusers/loaders/single_file_model.py | 35 ++++++++++++------- ...test_model_flux_transformer_single_file.py | 8 +++++ 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/src/diffusers/loaders/single_file_model.py b/src/diffusers/loaders/single_file_model.py index 16bd044107..b53647d476 100644 --- a/src/diffusers/loaders/single_file_model.py +++ b/src/diffusers/loaders/single_file_model.py @@ -22,6 +22,7 @@ from huggingface_hub.utils import validate_hf_hub_args from typing_extensions import Self from .. import __version__ +from ..models.model_loading_utils import _caching_allocator_warmup, _determine_device_map, _expand_device_map from ..quantizers import DiffusersAutoQuantizer from ..utils import deprecate, is_accelerate_available, is_torch_version, logging from ..utils.torch_utils import empty_device_cache @@ -297,6 +298,7 @@ class FromOriginalModelMixin: low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) device = kwargs.pop("device", None) disable_mmap = kwargs.pop("disable_mmap", False) + device_map = kwargs.pop("device_map", None) user_agent = {"diffusers": __version__, "file_type": "single_file", "framework": "pytorch"} # In order to ensure popular quantization methods are supported. Can be disable with `disable_telemetry` @@ -403,19 +405,8 @@ class FromOriginalModelMixin: with ctx(): model = cls.from_config(diffusers_model_config) - checkpoint_mapping_kwargs = _get_mapping_function_kwargs(checkpoint_mapping_fn, **kwargs) + model_state_dict = model.state_dict() - if _should_convert_state_dict_to_diffusers(model.state_dict(), checkpoint): - diffusers_format_checkpoint = checkpoint_mapping_fn( - config=diffusers_model_config, checkpoint=checkpoint, **checkpoint_mapping_kwargs - ) - else: - diffusers_format_checkpoint = checkpoint - - if not diffusers_format_checkpoint: - raise SingleFileComponentError( - f"Failed to load {mapping_class_name}. Weights for this component appear to be missing in the checkpoint." - ) # Check if `_keep_in_fp32_modules` is not None use_keep_in_fp32_modules = (cls._keep_in_fp32_modules is not None) and ( (torch_dtype == torch.float16) or hasattr(hf_quantizer, "use_keep_in_fp32_modules") @@ -428,6 +419,26 @@ class FromOriginalModelMixin: else: keep_in_fp32_modules = [] + # Now that the model is loaded, we can determine the `device_map` + device_map = _determine_device_map(model, device_map, None, torch_dtype, keep_in_fp32_modules, hf_quantizer) + if device_map is not None: + expanded_device_map = _expand_device_map(device_map, model_state_dict.keys()) + _caching_allocator_warmup(model, expanded_device_map, torch_dtype, hf_quantizer) + + checkpoint_mapping_kwargs = _get_mapping_function_kwargs(checkpoint_mapping_fn, **kwargs) + + if _should_convert_state_dict_to_diffusers(model_state_dict, checkpoint): + diffusers_format_checkpoint = checkpoint_mapping_fn( + config=diffusers_model_config, checkpoint=checkpoint, **checkpoint_mapping_kwargs + ) + else: + diffusers_format_checkpoint = checkpoint + + if not diffusers_format_checkpoint: + raise SingleFileComponentError( + f"Failed to load {mapping_class_name}. Weights for this component appear to be missing in the checkpoint." + ) + if hf_quantizer is not None: hf_quantizer.preprocess_model( model=model, diff --git a/tests/single_file/test_model_flux_transformer_single_file.py b/tests/single_file/test_model_flux_transformer_single_file.py index a7e07e517f..8290c339b9 100644 --- a/tests/single_file/test_model_flux_transformer_single_file.py +++ b/tests/single_file/test_model_flux_transformer_single_file.py @@ -69,3 +69,11 @@ class FluxTransformer2DModelSingleFileTests(unittest.TestCase): del model gc.collect() backend_empty_cache(torch_device) + + def test_device_map_cuda(self): + backend_empty_cache(torch_device) + model = self.model_class.from_single_file(self.ckpt_path, device_map="cuda") + + del model + gc.collect() + backend_empty_cache(torch_device) From e1b7f1f240526bf958c4bec79443094e4057fbf5 Mon Sep 17 00:00:00 2001 From: ttio2tech <125389792+ttio2tech@users.noreply.github.com> Date: Wed, 10 Sep 2025 14:59:08 -0400 Subject: [PATCH 60/74] fix for the qwen controlnet pipeline - wrong device can be used (#12309) fix the device for textencoder --- .../pipelines/qwenimage/pipeline_qwenimage_controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py index 322b1d9d3a..90470022af 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py @@ -265,7 +265,7 @@ class QwenImageControlNetPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): txt = [template.format(e) for e in prompt] txt_tokens = self.tokenizer( txt, max_length=self.tokenizer_max_length + drop_idx, padding=True, truncation=True, return_tensors="pt" - ).to(self.device) + ).to(device) encoder_hidden_states = self.text_encoder( input_ids=txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask, From eb7ef26736055055df252d8f06d665fd407f6fe7 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 11 Sep 2025 01:17:08 +0530 Subject: [PATCH 61/74] [quant] allow `components_to_quantize` to be a non-list for single components (#12234) * allow non list components_to_quantize. * up * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * [docs] components_to_quantize (#12287) init Co-authored-by: Sayak Paul --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/en/api/pipelines/cogvideox.md | 2 +- docs/source/en/api/pipelines/hunyuan_video.md | 6 +++--- docs/source/en/quantization/overview.md | 5 ++++- docs/source/en/using-diffusers/text-img2vid.md | 2 +- src/diffusers/quantizers/pipe_quant_config.py | 5 ++++- .../test_pipeline_level_quantization.py | 16 ++++++++++++++++ 6 files changed, 29 insertions(+), 7 deletions(-) diff --git a/docs/source/en/api/pipelines/cogvideox.md b/docs/source/en/api/pipelines/cogvideox.md index 157e987efd..ec673e0763 100644 --- a/docs/source/en/api/pipelines/cogvideox.md +++ b/docs/source/en/api/pipelines/cogvideox.md @@ -50,7 +50,7 @@ from diffusers.utils import export_to_video pipeline_quant_config = PipelineQuantizationConfig( quant_backend="torchao", quant_kwargs={"quant_type": "int8wo"}, - components_to_quantize=["transformer"] + components_to_quantize="transformer" ) # fp8 layerwise weight-casting diff --git a/docs/source/en/api/pipelines/hunyuan_video.md b/docs/source/en/api/pipelines/hunyuan_video.md index df52c49b36..cdd81495b6 100644 --- a/docs/source/en/api/pipelines/hunyuan_video.md +++ b/docs/source/en/api/pipelines/hunyuan_video.md @@ -54,7 +54,7 @@ pipeline_quant_config = PipelineQuantizationConfig( "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16 }, - components_to_quantize=["transformer"] + components_to_quantize="transformer" ) pipeline = HunyuanVideoPipeline.from_pretrained( @@ -91,7 +91,7 @@ pipeline_quant_config = PipelineQuantizationConfig( "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16 }, - components_to_quantize=["transformer"] + components_to_quantize="transformer" ) pipeline = HunyuanVideoPipeline.from_pretrained( @@ -139,7 +139,7 @@ export_to_video(video, "output.mp4", fps=15) "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16 }, - components_to_quantize=["transformer"] + components_to_quantize="transformer" ) pipeline = HunyuanVideoPipeline.from_pretrained( diff --git a/docs/source/en/quantization/overview.md b/docs/source/en/quantization/overview.md index 12c39f52e4..38abeeac6d 100644 --- a/docs/source/en/quantization/overview.md +++ b/docs/source/en/quantization/overview.md @@ -34,7 +34,9 @@ Initialize [`~quantizers.PipelineQuantizationConfig`] with the following paramet > [!TIP] > These `quant_kwargs` arguments are different for each backend. Refer to the [Quantization API](../api/quantization) docs to view the arguments for each backend. -- `components_to_quantize` specifies which components of the pipeline to quantize. Typically, you should quantize the most compute intensive components like the transformer. The text encoder is another component to consider quantizing if a pipeline has more than one such as [`FluxPipeline`]. The example below quantizes the T5 text encoder in [`FluxPipeline`] while keeping the CLIP model intact. +- `components_to_quantize` specifies which component(s) of the pipeline to quantize. Typically, you should quantize the most compute intensive components like the transformer. The text encoder is another component to consider quantizing if a pipeline has more than one such as [`FluxPipeline`]. The example below quantizes the T5 text encoder in [`FluxPipeline`] while keeping the CLIP model intact. + + `components_to_quantize` accepts either a list for multiple models or a string for a single model. The example below loads the bitsandbytes backend with the following arguments from [`~quantizers.quantization_config.BitsAndBytesConfig`], `load_in_4bit`, `bnb_4bit_quant_type`, and `bnb_4bit_compute_dtype`. @@ -62,6 +64,7 @@ pipe = DiffusionPipeline.from_pretrained( image = pipe("photo of a cute dog").images[0] ``` + ### Advanced quantization The `quant_mapping` argument provides more options for how to quantize each individual component in a pipeline, like combining different quantization backends. diff --git a/docs/source/en/using-diffusers/text-img2vid.md b/docs/source/en/using-diffusers/text-img2vid.md index ade3e0de32..9b69a2fded 100644 --- a/docs/source/en/using-diffusers/text-img2vid.md +++ b/docs/source/en/using-diffusers/text-img2vid.md @@ -98,7 +98,7 @@ pipeline_quant_config = PipelineQuantizationConfig( "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16 }, - components_to_quantize=["transformer"] + components_to_quantize="transformer" ) pipeline = HunyuanVideoPipeline.from_pretrained( diff --git a/src/diffusers/quantizers/pipe_quant_config.py b/src/diffusers/quantizers/pipe_quant_config.py index 5d02de16fd..f75a337341 100644 --- a/src/diffusers/quantizers/pipe_quant_config.py +++ b/src/diffusers/quantizers/pipe_quant_config.py @@ -48,12 +48,15 @@ class PipelineQuantizationConfig: self, quant_backend: str = None, quant_kwargs: Dict[str, Union[str, float, int, dict]] = None, - components_to_quantize: Optional[List[str]] = None, + components_to_quantize: Optional[Union[List[str], str]] = None, quant_mapping: Dict[str, Union[DiffQuantConfigMixin, "TransformersQuantConfigMixin"]] = None, ): self.quant_backend = quant_backend # Initialize kwargs to be {} to set to the defaults. self.quant_kwargs = quant_kwargs or {} + if components_to_quantize: + if isinstance(components_to_quantize, str): + components_to_quantize = [components_to_quantize] self.components_to_quantize = components_to_quantize self.quant_mapping = quant_mapping self.config_mapping = {} # book-keeping Example: `{module_name: quant_config}` diff --git a/tests/quantization/test_pipeline_level_quantization.py b/tests/quantization/test_pipeline_level_quantization.py index 51cf4057d6..5f1a3de2e5 100644 --- a/tests/quantization/test_pipeline_level_quantization.py +++ b/tests/quantization/test_pipeline_level_quantization.py @@ -299,3 +299,19 @@ transformer BitsAndBytesConfig { data = json.loads(json_part) return data + + def test_single_component_to_quantize(self): + component_to_quantize = "transformer" + quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=component_to_quantize, + ) + pipe = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ) + for name, component in pipe.components.items(): + if name == component_to_quantize: + self.assertTrue(hasattr(component.config, "quantization_config")) From 55f0b3d758602226af01a48059145edd55289cfa Mon Sep 17 00:00:00 2001 From: Justin Ruan Date: Thu, 11 Sep 2025 06:47:34 +0800 Subject: [PATCH 62/74] Fix AttributeError of `VisualClozeProcessor` (#12121) Co-authored-by: YiYi Xu --- src/diffusers/pipelines/visualcloze/visualcloze_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/visualcloze/visualcloze_utils.py b/src/diffusers/pipelines/visualcloze/visualcloze_utils.py index 5d221bc1e8..efe5dff476 100644 --- a/src/diffusers/pipelines/visualcloze/visualcloze_utils.py +++ b/src/diffusers/pipelines/visualcloze/visualcloze_utils.py @@ -110,7 +110,7 @@ class VisualClozeProcessor(VaeImageProcessor): new_h = int(processed_images[i][j].height * (new_w / processed_images[i][j].width)) new_w = int(new_w / 16) * 16 new_h = int(new_h / 16) * 16 - processed_images[i][j] = self.height(processed_images[i][j], new_h, new_w) + processed_images[i][j] = self._resize_and_crop(processed_images[i][j], new_h, new_w) # Convert to tensors and normalize image_sizes = [] From 5e181eddfe7e44c1444a2511b0d8e21d177850a0 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 11 Sep 2025 10:04:35 +0530 Subject: [PATCH 63/74] Deprecate slicing and tiling methods from `DiffusionPipeline` (#12271) * deprecate slicing from flux pipeline. * propagate. * tiling * up * up --- .../pipeline_faithdiff_stable_diffusion_xl.py | 12 ++++++++ .../pipeline_flux_kontext_multiple_images.py | 13 ++++++++ .../community/pipeline_flux_rf_inversion.py | 25 ++++++++++++++++ .../pipeline_flux_semantic_guidance.py | 13 ++++++++ examples/community/pipeline_flux_with_cfg.py | 25 ++++++++++++++++ ...stable_diffusion_3_differential_img2img.py | 6 +--- .../pipeline_stable_diffusion_boxdiff.py | 24 +++++++++++++++ .../pipeline_stable_diffusion_pag.py | 24 +++++++++++++++ .../community/pipeline_stg_hunyuan_video.py | 26 +++++++++++++++- examples/community/pipeline_stg_mochi.py | 30 +++++++++++++++---- .../pipeline_prompt_diffusion.py | 12 ++++++++ .../pipelines/allegro/pipeline_allegro.py | 24 +++++++++++++++ .../pipelines/audioldm2/pipeline_audioldm2.py | 13 ++++++++ .../blip_diffusion/pipeline_blip_diffusion.py | 6 +--- .../pipelines/chroma/pipeline_chroma.py | 25 ++++++++++++++++ .../chroma/pipeline_chroma_img2img.py | 25 ++++++++++++++++ .../pipeline_cogvideox_image2video.py | 6 +--- .../pipeline_consistency_models.py | 6 +--- .../pipeline_controlnet_blip_diffusion.py | 6 +--- .../pipeline_hunyuandit_controlnet.py | 6 +--- src/diffusers/pipelines/flux/pipeline_flux.py | 25 ++++++++++++++++ .../pipelines/flux/pipeline_flux_control.py | 25 ++++++++++++++++ .../flux/pipeline_flux_control_inpaint.py | 25 ++++++++++++++++ .../pipelines/flux/pipeline_flux_fill.py | 25 ++++++++++++++++ .../pipelines/flux/pipeline_flux_img2img.py | 25 ++++++++++++++++ .../pipelines/flux/pipeline_flux_kontext.py | 25 ++++++++++++++++ .../flux/pipeline_flux_kontext_inpaint.py | 25 ++++++++++++++++ .../hidream_image/pipeline_hidream_image.py | 24 +++++++++++++++ .../pipeline_hunyuan_skyreels_image2video.py | 26 +++++++++++++++- .../hunyuan_video/pipeline_hunyuan_video.py | 26 +++++++++++++++- .../pipeline_hunyuan_video_framepack.py | 26 +++++++++++++++- .../pipeline_hunyuan_video_image2video.py | 26 +++++++++++++++- .../hunyuandit/pipeline_hunyuandit.py | 6 +--- .../pipelines/kandinsky/pipeline_kandinsky.py | 6 +--- .../kandinsky/pipeline_kandinsky_img2img.py | 6 +--- .../kandinsky/pipeline_kandinsky_inpaint.py | 6 +--- .../pipeline_kandinsky2_2_prior.py | 6 +--- .../pipeline_kandinsky2_2_prior_emb2emb.py | 6 +--- .../pipeline_leditspp_stable_diffusion.py | 24 +++++++++++++++ .../pipeline_leditspp_stable_diffusion_xl.py | 25 ++++++++++++++++ .../ltx/pipeline_ltx_latent_upsample.py | 26 +++++++++++++++- .../pipelines/lumina2/pipeline_lumina2.py | 24 +++++++++++++++ .../pipelines/mochi/pipeline_mochi.py | 30 +++++++++++++++---- .../pipelines/omnigen/pipeline_omnigen.py | 26 +++++++++++++++- .../pipelines/pag/pipeline_pag_hunyuandit.py | 6 +--- .../pipelines/pag/pipeline_pag_sana.py | 25 ++++++++++++++++ src/diffusers/pipelines/pipeline_utils.py | 25 ++++++++++++++++ .../pipelines/qwenimage/pipeline_qwenimage.py | 26 +++++++++++++++- .../pipeline_qwenimage_controlnet.py | 26 +++++++++++++++- .../qwenimage/pipeline_qwenimage_edit.py | 26 +++++++++++++++- .../pipeline_qwenimage_edit_inpaint.py | 26 +++++++++++++++- .../qwenimage/pipeline_qwenimage_img2img.py | 26 +++++++++++++++- .../qwenimage/pipeline_qwenimage_inpaint.py | 26 +++++++++++++++- src/diffusers/pipelines/sana/pipeline_sana.py | 25 ++++++++++++++++ .../sana/pipeline_sana_controlnet.py | 25 ++++++++++++++++ .../pipelines/sana/pipeline_sana_sprint.py | 25 ++++++++++++++++ .../sana/pipeline_sana_sprint_img2img.py | 25 ++++++++++++++++ .../stable_audio/pipeline_stable_audio.py | 18 +++++++---- .../unidiffuser/pipeline_unidiffuser.py | 24 +++++++++++++++ .../pipeline_visualcloze_combined.py | 6 +--- .../pipeline_visualcloze_generation.py | 25 ++++++++++++++++ 61 files changed, 1108 insertions(+), 98 deletions(-) diff --git a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py index aa95d2ec71..a8fdc133d0 100644 --- a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py +++ b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py @@ -1705,6 +1705,12 @@ class FaithDiffStableDiffusionXLPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() self.unet.denoise_encoder.enable_tiling() @@ -1713,6 +1719,12 @@ class FaithDiffStableDiffusionXLPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() self.unet.denoise_encoder.disable_tiling() diff --git a/examples/community/pipeline_flux_kontext_multiple_images.py b/examples/community/pipeline_flux_kontext_multiple_images.py index 7e4a9ed0fa..9e6ae427db 100644 --- a/examples/community/pipeline_flux_kontext_multiple_images.py +++ b/examples/community/pipeline_flux_kontext_multiple_images.py @@ -35,6 +35,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -643,6 +644,12 @@ class FluxKontextPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling @@ -651,6 +658,12 @@ class FluxKontextPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def preprocess_image(self, image: PipelineImageInput, _auto_resize: bool, multiple_of: int) -> torch.Tensor: diff --git a/examples/community/pipeline_flux_rf_inversion.py b/examples/community/pipeline_flux_rf_inversion.py index 8f8b4817ac..2cd6eb088c 100644 --- a/examples/community/pipeline_flux_rf_inversion.py +++ b/examples/community/pipeline_flux_rf_inversion.py @@ -30,6 +30,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -526,6 +527,12 @@ class RFInversionFluxPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -533,6 +540,12 @@ class RFInversionFluxPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -541,6 +554,12 @@ class RFInversionFluxPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -548,6 +567,12 @@ class RFInversionFluxPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents_inversion( diff --git a/examples/community/pipeline_flux_semantic_guidance.py b/examples/community/pipeline_flux_semantic_guidance.py index b3d2b3a4b4..74cd5c6981 100644 --- a/examples/community/pipeline_flux_semantic_guidance.py +++ b/examples/community/pipeline_flux_semantic_guidance.py @@ -35,6 +35,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -702,6 +703,12 @@ class FluxSemanticGuidancePipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling @@ -710,6 +717,12 @@ class FluxSemanticGuidancePipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents diff --git a/examples/community/pipeline_flux_with_cfg.py b/examples/community/pipeline_flux_with_cfg.py index 3916aff257..5bc13f7e5e 100644 --- a/examples/community/pipeline_flux_with_cfg.py +++ b/examples/community/pipeline_flux_with_cfg.py @@ -28,6 +28,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -503,6 +504,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -510,6 +517,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -518,6 +531,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -525,6 +544,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py index 643386232b..1803cf60cc 100644 --- a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py +++ b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py @@ -29,11 +29,7 @@ from diffusers.models.transformers import SD3Transformer2DModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput from diffusers.schedulers import FlowMatchEulerDiscreteScheduler -from diffusers.utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor diff --git a/examples/community/pipeline_stable_diffusion_boxdiff.py b/examples/community/pipeline_stable_diffusion_boxdiff.py index ebca3017c3..1133321fcc 100644 --- a/examples/community/pipeline_stable_diffusion_boxdiff.py +++ b/examples/community/pipeline_stable_diffusion_boxdiff.py @@ -504,6 +504,12 @@ class StableDiffusionBoxDiffPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -511,6 +517,12 @@ class StableDiffusionBoxDiffPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -519,6 +531,12 @@ class StableDiffusionBoxDiffPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -526,6 +544,12 @@ class StableDiffusionBoxDiffPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def _encode_prompt( diff --git a/examples/community/pipeline_stable_diffusion_pag.py b/examples/community/pipeline_stable_diffusion_pag.py index 69a0059d98..6728e2a60b 100644 --- a/examples/community/pipeline_stable_diffusion_pag.py +++ b/examples/community/pipeline_stable_diffusion_pag.py @@ -471,6 +471,12 @@ class StableDiffusionPAGPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -478,6 +484,12 @@ class StableDiffusionPAGPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -486,6 +498,12 @@ class StableDiffusionPAGPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -493,6 +511,12 @@ class StableDiffusionPAGPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def _encode_prompt( diff --git a/examples/community/pipeline_stg_hunyuan_video.py b/examples/community/pipeline_stg_hunyuan_video.py index a2cb9aa1b7..028d54d047 100644 --- a/examples/community/pipeline_stg_hunyuan_video.py +++ b/examples/community/pipeline_stg_hunyuan_video.py @@ -26,7 +26,7 @@ from diffusers.models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3 from diffusers.pipelines.hunyuan_video.pipeline_output import HunyuanVideoPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler -from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring +from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor @@ -481,6 +481,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -488,6 +494,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -496,6 +508,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -503,6 +521,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() @property diff --git a/examples/community/pipeline_stg_mochi.py b/examples/community/pipeline_stg_mochi.py index dbe5d2525a..ad9317f6bc 100644 --- a/examples/community/pipeline_stg_mochi.py +++ b/examples/community/pipeline_stg_mochi.py @@ -26,11 +26,7 @@ from diffusers.models import AutoencoderKLMochi, MochiTransformer3DModel from diffusers.pipelines.mochi.pipeline_output import MochiPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler -from diffusers.utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor @@ -458,6 +454,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -465,6 +467,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -473,6 +481,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -480,6 +494,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/examples/research_projects/promptdiffusion/pipeline_prompt_diffusion.py b/examples/research_projects/promptdiffusion/pipeline_prompt_diffusion.py index 7dfbc8b3e5..1bd9c0161f 100644 --- a/examples/research_projects/promptdiffusion/pipeline_prompt_diffusion.py +++ b/examples/research_projects/promptdiffusion/pipeline_prompt_diffusion.py @@ -263,6 +263,12 @@ class PromptDiffusionPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling @@ -271,6 +277,12 @@ class PromptDiffusionPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt diff --git a/src/diffusers/pipelines/allegro/pipeline_allegro.py b/src/diffusers/pipelines/allegro/pipeline_allegro.py index 2c9548706e..3be0129088 100644 --- a/src/diffusers/pipelines/allegro/pipeline_allegro.py +++ b/src/diffusers/pipelines/allegro/pipeline_allegro.py @@ -651,6 +651,12 @@ class AllegroPipeline(DiffusionPipeline): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -658,6 +664,12 @@ class AllegroPipeline(DiffusionPipeline): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -666,6 +678,12 @@ class AllegroPipeline(DiffusionPipeline): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -673,6 +691,12 @@ class AllegroPipeline(DiffusionPipeline): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() @property diff --git a/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py b/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py index 0af2e1fe36..452fc3c01b 100644 --- a/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py +++ b/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py @@ -34,6 +34,7 @@ from transformers import ( from ...models import AutoencoderKL from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( + deprecate, is_accelerate_available, is_accelerate_version, is_librosa_available, @@ -228,6 +229,12 @@ class AudioLDM2Pipeline(DiffusionPipeline): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing @@ -236,6 +243,12 @@ class AudioLDM2Pipeline(DiffusionPipeline): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): diff --git a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py index 8cd463c970..705d930b59 100644 --- a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +++ b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py @@ -19,11 +19,7 @@ from transformers import CLIPTokenizer from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import PNDMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from .blip_image_processing import BlipImageProcessor diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index a3dd1422b8..f3ed700bc4 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -25,6 +25,7 @@ from ...models import AutoencoderKL, ChromaTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -508,6 +509,12 @@ class ChromaPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -515,6 +522,12 @@ class ChromaPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -523,6 +536,12 @@ class ChromaPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -530,6 +549,12 @@ class ChromaPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py index 233f4c43a1..26f13fe06c 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py @@ -25,6 +25,7 @@ from ...models import AutoencoderKL, ChromaTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -542,6 +543,12 @@ class ChromaImg2ImgPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -549,6 +556,12 @@ class ChromaImg2ImgPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -557,6 +570,12 @@ class ChromaImg2ImgPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -564,6 +583,12 @@ class ChromaImg2ImgPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py index 225240927f..c523c9adec 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py @@ -28,11 +28,7 @@ from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel from ...models.embeddings import get_3d_rotary_pos_embed from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import CogVideoXPipelineOutput diff --git a/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py b/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py index dec448f3f4..1fbdeb1f27 100644 --- a/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py +++ b/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py @@ -18,11 +18,7 @@ import torch from ...models import UNet2DModel from ...schedulers import CMStochasticIterativeScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py index c2ae408778..e0f1879405 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py @@ -20,11 +20,7 @@ from transformers import CLIPTokenizer from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from ...schedulers import PNDMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..blip_diffusion.blip_image_processing import BlipImageProcessor from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel diff --git a/src/diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py b/src/diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py index 9b9adf4901..2b5684de95 100644 --- a/src/diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +++ b/src/diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py @@ -27,11 +27,7 @@ from ...models import AutoencoderKL, HunyuanDiT2DControlNetModel, HunyuanDiT2DMo from ...models.embeddings import get_2d_rotary_pos_embed from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDPMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline diff --git a/src/diffusers/pipelines/flux/pipeline_flux.py b/src/diffusers/pipelines/flux/pipeline_flux.py index 124e611bd0..5041e352f7 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux.py +++ b/src/diffusers/pipelines/flux/pipeline_flux.py @@ -32,6 +32,7 @@ from ...models import AutoencoderKL, FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -545,6 +546,12 @@ class FluxPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -552,6 +559,12 @@ class FluxPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -560,6 +573,12 @@ class FluxPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -567,6 +586,12 @@ class FluxPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control.py b/src/diffusers/pipelines/flux/pipeline_flux_control.py index cc9ebb4754..848d7bd392 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control.py @@ -26,6 +26,7 @@ from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -496,6 +497,12 @@ class FluxControlPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -503,6 +510,12 @@ class FluxControlPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -511,6 +524,12 @@ class FluxControlPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -518,6 +537,12 @@ class FluxControlPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py index 5acc5080f5..6915a83a7c 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py @@ -35,6 +35,7 @@ from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -577,6 +578,12 @@ class FluxControlInpaintPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -584,6 +591,12 @@ class FluxControlInpaintPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -592,6 +605,12 @@ class FluxControlInpaintPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -599,6 +618,12 @@ class FluxControlInpaintPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/flux/pipeline_flux_fill.py b/src/diffusers/pipelines/flux/pipeline_flux_fill.py index 956f6fb106..5cb9c82204 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_fill.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_fill.py @@ -26,6 +26,7 @@ from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -633,6 +634,12 @@ class FluxFillPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -640,6 +647,12 @@ class FluxFillPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -648,6 +661,12 @@ class FluxFillPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -655,6 +674,12 @@ class FluxFillPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux_img2img.FluxImg2ImgPipeline.prepare_latents diff --git a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py index 4a9f2bad6a..ab9140dae9 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py @@ -33,6 +33,7 @@ from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -613,6 +614,12 @@ class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_slicing @@ -621,6 +628,12 @@ class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.enable_vae_tiling @@ -630,6 +643,12 @@ class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling @@ -638,6 +657,12 @@ class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py index 87011299c4..94ae460afc 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py @@ -32,6 +32,7 @@ from ...models import AutoencoderKL, FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -614,6 +615,12 @@ class FluxKontextPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_slicing @@ -622,6 +629,12 @@ class FluxKontextPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.enable_vae_tiling @@ -631,6 +644,12 @@ class FluxKontextPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling @@ -639,6 +658,12 @@ class FluxKontextPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py index 3cdb8caea2..b6f957981e 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py @@ -22,6 +22,7 @@ from ...models import AutoencoderKL, FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -688,6 +689,12 @@ class FluxKontextInpaintPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_slicing @@ -696,6 +703,12 @@ class FluxKontextInpaintPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.enable_vae_tiling @@ -705,6 +718,12 @@ class FluxKontextInpaintPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling @@ -713,6 +732,12 @@ class FluxKontextInpaintPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py index bf36ca2fa3..b6af23bca8 100644 --- a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py +++ b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py @@ -522,6 +522,12 @@ class HiDreamImagePipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -529,6 +535,12 @@ class HiDreamImagePipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -537,6 +549,12 @@ class HiDreamImagePipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -544,6 +562,12 @@ class HiDreamImagePipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def check_inputs( diff --git a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py index d8c3548946..b50a6ae3ed 100644 --- a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py +++ b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py @@ -24,7 +24,7 @@ from ...image_processor import PipelineImageInput from ...loaders import HunyuanVideoLoraLoaderMixin from ...models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline @@ -463,6 +463,12 @@ class HunyuanSkyreelsImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoa Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -470,6 +476,12 @@ class HunyuanSkyreelsImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoa Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -478,6 +490,12 @@ class HunyuanSkyreelsImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoa compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -485,6 +503,12 @@ class HunyuanSkyreelsImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoa Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() @property diff --git a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py index 76b288ed0b..5c8e295eaf 100644 --- a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +++ b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py @@ -23,7 +23,7 @@ from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import HunyuanVideoLoraLoaderMixin from ...models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline @@ -420,6 +420,12 @@ class HunyuanVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -427,6 +433,12 @@ class HunyuanVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -435,6 +447,12 @@ class HunyuanVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -442,6 +460,12 @@ class HunyuanVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() @property diff --git a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_framepack.py b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_framepack.py index 40d6534655..8006514f47 100644 --- a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_framepack.py +++ b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_framepack.py @@ -33,7 +33,7 @@ from ...image_processor import PipelineImageInput from ...loaders import HunyuanVideoLoraLoaderMixin from ...models import AutoencoderKLHunyuanVideo, HunyuanVideoFramepackTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline @@ -570,6 +570,12 @@ class HunyuanVideoFramepackPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMix Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -577,6 +583,12 @@ class HunyuanVideoFramepackPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMix Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -585,6 +597,12 @@ class HunyuanVideoFramepackPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMix compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -592,6 +610,12 @@ class HunyuanVideoFramepackPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMix Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() @property diff --git a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py index b9246e2eb2..aa04e65097 100644 --- a/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py +++ b/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py @@ -30,7 +30,7 @@ from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import HunyuanVideoLoraLoaderMixin from ...models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline @@ -598,6 +598,12 @@ class HunyuanVideoImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoader Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -605,6 +611,12 @@ class HunyuanVideoImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoader Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -613,6 +625,12 @@ class HunyuanVideoImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoader compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -620,6 +638,12 @@ class HunyuanVideoImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoader Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() @property diff --git a/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py b/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py index c7f84866fe..e2f935aaf4 100644 --- a/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +++ b/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py @@ -27,11 +27,7 @@ from ...models import AutoencoderKL, HunyuanDiT2DModel from ...models.embeddings import get_2d_rotary_pos_embed from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDPMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py index 92f612f541..33529f5d09 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -21,11 +21,7 @@ from transformers import ( from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler, DDPMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py index 998fc777c0..f5e41d499d 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py @@ -23,11 +23,7 @@ from transformers import ( from ...image_processor import VaeImageProcessor from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index cde0b8fd0a..731fce4998 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -28,11 +28,7 @@ from transformers import ( from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index 0e7e16f9dd..bc67847831 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -6,11 +6,7 @@ from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTo from ...models import PriorTransformer from ...schedulers import UnCLIPScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py index 1a7198b968..b586d16611 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -6,11 +6,7 @@ from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTo from ...models import PriorTransformer from ...schedulers import UnCLIPScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline diff --git a/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py b/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py index 341ccabaa1..5b61aaf9b6 100644 --- a/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +++ b/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py @@ -722,6 +722,12 @@ class LEditsPPPipelineStableDiffusion( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -729,6 +735,12 @@ class LEditsPPPipelineStableDiffusion( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -737,6 +749,12 @@ class LEditsPPPipelineStableDiffusion( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -744,6 +762,12 @@ class LEditsPPPipelineStableDiffusion( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() @torch.no_grad() diff --git a/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py b/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py index ac64844f6f..c1f9a98f06 100644 --- a/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +++ b/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py @@ -44,6 +44,7 @@ from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, @@ -770,6 +771,12 @@ class LEditsPPPipelineStableDiffusionXL( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -777,6 +784,12 @@ class LEditsPPPipelineStableDiffusionXL( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -785,6 +798,12 @@ class LEditsPPPipelineStableDiffusionXL( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -792,6 +811,12 @@ class LEditsPPPipelineStableDiffusionXL( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.prepare_unet diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py b/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py index 284f33b326..1e94f6895f 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py @@ -18,7 +18,7 @@ import torch from ...image_processor import PipelineImageInput from ...models import AutoencoderKLLTXVideo -from ...utils import get_logger +from ...utils import deprecate, get_logger from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline @@ -148,6 +148,12 @@ class LTXLatentUpsamplePipeline(DiffusionPipeline): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -155,6 +161,12 @@ class LTXLatentUpsamplePipeline(DiffusionPipeline): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -163,6 +175,12 @@ class LTXLatentUpsamplePipeline(DiffusionPipeline): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -170,6 +188,12 @@ class LTXLatentUpsamplePipeline(DiffusionPipeline): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def check_inputs(self, video, height, width, latents): diff --git a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py index c4df7ba1c3..937803edbc 100644 --- a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py +++ b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py @@ -433,6 +433,12 @@ class Lumina2Pipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -440,6 +446,12 @@ class Lumina2Pipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -448,6 +460,12 @@ class Lumina2Pipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -455,6 +473,12 @@ class Lumina2Pipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): diff --git a/src/diffusers/pipelines/mochi/pipeline_mochi.py b/src/diffusers/pipelines/mochi/pipeline_mochi.py index 5581529b23..5874a92c6f 100644 --- a/src/diffusers/pipelines/mochi/pipeline_mochi.py +++ b/src/diffusers/pipelines/mochi/pipeline_mochi.py @@ -23,11 +23,7 @@ from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import Mochi1LoraLoaderMixin from ...models import AutoencoderKLMochi, MochiTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline @@ -396,6 +392,12 @@ class MochiPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -403,6 +405,12 @@ class MochiPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -411,6 +419,12 @@ class MochiPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -418,6 +432,12 @@ class MochiPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py index f5a535b2da..090cb46aac 100644 --- a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py +++ b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py @@ -23,7 +23,7 @@ from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models.autoencoders import AutoencoderKL from ...models.transformers import OmniGenTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, is_torchvision_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, is_torchvision_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -235,6 +235,12 @@ class OmniGenPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -242,6 +248,12 @@ class OmniGenPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -250,6 +262,12 @@ class OmniGenPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -257,6 +275,12 @@ class OmniGenPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents diff --git a/src/diffusers/pipelines/pag/pipeline_pag_hunyuandit.py b/src/diffusers/pipelines/pag/pipeline_pag_hunyuandit.py index 3a08408662..d156eac8f3 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_hunyuandit.py @@ -28,11 +28,7 @@ from ...models.attention_processor import PAGCFGHunyuanAttnProcessor2_0, PAGHuny from ...models.embeddings import get_2d_rotary_pos_embed from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDPMScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pag_utils import PAGMixin diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sana.py b/src/diffusers/pipelines/pag/pipeline_pag_sana.py index 5857eeeb04..9e91ccbe80 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sana.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sana.py @@ -29,6 +29,7 @@ from ...models.attention_processor import PAGCFGSanaLinearAttnProcessor2_0, PAGI from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( BACKENDS_MAPPING, + deprecate, is_bs4_available, is_ftfy_available, is_torch_xla_available, @@ -190,6 +191,12 @@ class SanaPAGPipeline(DiffusionPipeline, PAGMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -197,6 +204,12 @@ class SanaPAGPipeline(DiffusionPipeline, PAGMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -205,6 +218,12 @@ class SanaPAGPipeline(DiffusionPipeline, PAGMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -212,6 +231,12 @@ class SanaPAGPipeline(DiffusionPipeline, PAGMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def encode_prompt( diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 0116ad917c..ce0dbe21c8 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -57,6 +57,7 @@ from ..utils import ( PushToHubMixin, _get_detailed_type, _is_valid_type, + deprecate, is_accelerate_available, is_accelerate_version, is_hpu_available, @@ -2201,6 +2202,12 @@ class StableDiffusionMixin: Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -2208,6 +2215,12 @@ class StableDiffusionMixin: Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -2216,6 +2229,12 @@ class StableDiffusionMixin: compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -2223,6 +2242,12 @@ class StableDiffusionMixin: Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py index 807910dfb1..33dc2039b9 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py @@ -23,7 +23,7 @@ from ...image_processor import VaeImageProcessor from ...loaders import QwenImageLoraLoaderMixin from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import QwenImagePipelineOutput @@ -348,6 +348,12 @@ class QwenImagePipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -355,6 +361,12 @@ class QwenImagePipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -363,6 +375,12 @@ class QwenImagePipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -370,6 +388,12 @@ class QwenImagePipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py index 90470022af..5111096d93 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py @@ -24,7 +24,7 @@ from ...loaders import QwenImageLoraLoaderMixin from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel from ...models.controlnets.controlnet_qwenimage import QwenImageControlNetModel, QwenImageMultiControlNetModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import QwenImagePipelineOutput @@ -412,6 +412,12 @@ class QwenImageControlNetPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -419,6 +425,12 @@ class QwenImageControlNetPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -427,6 +439,12 @@ class QwenImageControlNetPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -434,6 +452,12 @@ class QwenImageControlNetPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.prepare_latents diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py index 977f2790a3..88d1ce4a46 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py @@ -24,7 +24,7 @@ from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import QwenImageLoraLoaderMixin from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import QwenImagePipelineOutput @@ -421,6 +421,12 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -428,6 +434,12 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -436,6 +448,12 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -443,6 +461,12 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py index b064c40bca..d54d1881fa 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py @@ -25,7 +25,7 @@ from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import QwenImageLoraLoaderMixin from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import QwenImagePipelineOutput @@ -466,6 +466,12 @@ class QwenImageEditInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -473,6 +479,12 @@ class QwenImageEditInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -481,6 +493,12 @@ class QwenImageEditInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -488,6 +506,12 @@ class QwenImageEditInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_inpaint.QwenImageInpaintPipeline.prepare_latents diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py index 8040852e53..cb4c5d8016 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py @@ -9,7 +9,7 @@ from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import QwenImageLoraLoaderMixin from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import QwenImagePipelineOutput @@ -397,6 +397,12 @@ class QwenImageImg2ImgPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -404,6 +410,12 @@ class QwenImageImg2ImgPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -412,6 +424,12 @@ class QwenImageImg2ImgPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -419,6 +437,12 @@ class QwenImageImg2ImgPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py index 4d502569a0..1915c27eb2 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py @@ -10,7 +10,7 @@ from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import QwenImageLoraLoaderMixin from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import QwenImagePipelineOutput @@ -424,6 +424,12 @@ class QwenImageInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -431,6 +437,12 @@ class QwenImageInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -439,6 +451,12 @@ class QwenImageInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -446,6 +464,12 @@ class QwenImageInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def prepare_latents( diff --git a/src/diffusers/pipelines/sana/pipeline_sana.py b/src/diffusers/pipelines/sana/pipeline_sana.py index c54fec5b3a..ac979305ca 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana.py +++ b/src/diffusers/pipelines/sana/pipeline_sana.py @@ -30,6 +30,7 @@ from ...schedulers import DPMSolverMultistepScheduler from ...utils import ( BACKENDS_MAPPING, USE_PEFT_BACKEND, + deprecate, is_bs4_available, is_ftfy_available, is_torch_xla_available, @@ -224,6 +225,12 @@ class SanaPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -231,6 +238,12 @@ class SanaPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -239,6 +252,12 @@ class SanaPipeline(DiffusionPipeline, SanaLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -246,6 +265,12 @@ class SanaPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def _get_gemma_prompt_embeds( diff --git a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py index 17d6dfd83e..55ed7b84eb 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py @@ -30,6 +30,7 @@ from ...schedulers import DPMSolverMultistepScheduler from ...utils import ( BACKENDS_MAPPING, USE_PEFT_BACKEND, + deprecate, is_bs4_available, is_ftfy_available, is_torch_xla_available, @@ -237,6 +238,12 @@ class SanaControlNetPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -244,6 +251,12 @@ class SanaControlNetPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -252,6 +265,12 @@ class SanaControlNetPipeline(DiffusionPipeline, SanaLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -259,6 +278,12 @@ class SanaControlNetPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline._get_gemma_prompt_embeds diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py index a140cc1672..62b9788292 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py @@ -30,6 +30,7 @@ from ...schedulers import DPMSolverMultistepScheduler from ...utils import ( BACKENDS_MAPPING, USE_PEFT_BACKEND, + deprecate, is_bs4_available, is_ftfy_available, is_torch_xla_available, @@ -175,6 +176,12 @@ class SanaSprintPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -182,6 +189,12 @@ class SanaSprintPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -190,6 +203,12 @@ class SanaSprintPipeline(DiffusionPipeline, SanaLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -197,6 +216,12 @@ class SanaSprintPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline._get_gemma_prompt_embeds diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py index 34d3b9d17e..8899ed84c4 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py @@ -31,6 +31,7 @@ from ...schedulers import DPMSolverMultistepScheduler from ...utils import ( BACKENDS_MAPPING, USE_PEFT_BACKEND, + deprecate, is_bs4_available, is_ftfy_available, is_torch_xla_available, @@ -183,6 +184,12 @@ class SanaSprintImg2ImgPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline.disable_vae_slicing @@ -191,6 +198,12 @@ class SanaSprintImg2ImgPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline.enable_vae_tiling @@ -200,6 +213,12 @@ class SanaSprintImg2ImgPipeline(DiffusionPipeline, SanaLoraLoaderMixin): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -207,6 +226,12 @@ class SanaSprintImg2ImgPipeline(DiffusionPipeline, SanaLoraLoaderMixin): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline._get_gemma_prompt_embeds diff --git a/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py b/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py index 8861ecae7d..b7faf097ab 100644 --- a/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py +++ b/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py @@ -25,11 +25,7 @@ from transformers import ( from ...models import AutoencoderOobleck, StableAudioDiTModel from ...models.embeddings import get_1d_rotary_pos_embed from ...schedulers import EDMDPMSolverMultistepScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .modeling_stable_audio import StableAudioProjectionModel @@ -134,6 +130,12 @@ class StableAudioPipeline(DiffusionPipeline): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing @@ -142,6 +144,12 @@ class StableAudioPipeline(DiffusionPipeline): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def encode_prompt( diff --git a/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py b/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py index 40fd3b3373..f9298d5b86 100644 --- a/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +++ b/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py @@ -232,6 +232,12 @@ class UniDiffuserPipeline(DeprecatedPipelineMixin, DiffusionPipeline): Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing @@ -240,6 +246,12 @@ class UniDiffuserPipeline(DeprecatedPipelineMixin, DiffusionPipeline): Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_tiling @@ -249,6 +261,12 @@ class UniDiffuserPipeline(DeprecatedPipelineMixin, DiffusionPipeline): compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_tiling @@ -257,6 +275,12 @@ class UniDiffuserPipeline(DeprecatedPipelineMixin, DiffusionPipeline): Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() # Functions to manually set the mode diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py index 4e5b32c10c..91a54e1ae8 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py @@ -22,11 +22,7 @@ from ...loaders import FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversio from ...models.autoencoders import AutoencoderKL from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler -from ...utils import ( - is_torch_xla_available, - logging, - replace_example_docstring, -) +from ...utils import is_torch_xla_available, logging, replace_example_docstring from ..flux.pipeline_flux_fill import FluxFillPipeline as VisualClozeUpsamplingPipeline from ..flux.pipeline_output import FluxPipelineOutput from ..pipeline_utils import DiffusionPipeline diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py index 8571211cd0..e12995106b 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py @@ -24,6 +24,7 @@ from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, + deprecate, is_torch_xla_available, logging, replace_example_docstring, @@ -524,6 +525,12 @@ class VisualClozeGenerationPipeline( Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ + depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." + deprecate( + "enable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.enable_slicing() def disable_vae_slicing(self): @@ -531,6 +538,12 @@ class VisualClozeGenerationPipeline( Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." + deprecate( + "disable_vae_slicing", + "0.40.0", + depr_message, + ) self.vae.disable_slicing() def enable_vae_tiling(self): @@ -539,6 +552,12 @@ class VisualClozeGenerationPipeline( compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ + depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." + deprecate( + "enable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.enable_tiling() def disable_vae_tiling(self): @@ -546,6 +565,12 @@ class VisualClozeGenerationPipeline( Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ + depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." + deprecate( + "disable_vae_tiling", + "0.40.0", + depr_message, + ) self.vae.disable_tiling() def _prepare_latents(self, image, mask, gen, vae_scale_factor, device, dtype): From f5c113e4395bc373ab540fc5a1f7490b7120c40f Mon Sep 17 00:00:00 2001 From: Daniel Socek Date: Fri, 12 Sep 2025 11:00:36 -0700 Subject: [PATCH 64/74] Use SDP on BF16 in GPU/HPU migration (#12310) * Use SDP on BF16 in GPU/HPU migration Signed-off-by: Daniel Socek * Formatting fix for enabling SDP with BF16 precision on HPU Signed-off-by: Daniel Socek --------- Signed-off-by: Daniel Socek --- src/diffusers/pipelines/pipeline_utils.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index ce0dbe21c8..01b3c56777 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -505,6 +505,13 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): os.environ["PT_HPU_MAX_COMPOUND_OP_SIZE"] = "1" logger.debug("Environment variable set: PT_HPU_MAX_COMPOUND_OP_SIZE=1") + if dtype in (torch.bfloat16, None) and kwargs.pop("sdp_on_bf16", True): + if hasattr(torch._C, "_set_math_sdp_allow_fp16_bf16_reduction"): + torch._C._set_math_sdp_allow_fp16_bf16_reduction(True) + logger.warning( + "Enabled SDP with BF16 precision on HPU. To disable, please use `.to('hpu', sdp_on_bf16=False)`" + ) + module_names, _ = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] From b50014067d7d31ef09d6dad62175bc19c2e258bf Mon Sep 17 00:00:00 2001 From: Linoy Tsaban <57615435+linoytsaban@users.noreply.github.com> Date: Mon, 15 Sep 2025 18:01:26 +0200 Subject: [PATCH 65/74] Add Wan2.2 VACE - Fun (#12324) * support Wan2.2-VACE-Fun-A14B * support Wan2.2-VACE-Fun-A14B * support Wan2.2-VACE-Fun-A14B * Apply style fixes * test --------- Co-authored-by: github-actions[bot] --- scripts/convert_wan_to_diffusers.py | 35 ++++++++- .../pipelines/wan/pipeline_wan_vace.py | 75 +++++++++++++++---- tests/pipelines/wan/test_wan_vace.py | 1 + 3 files changed, 94 insertions(+), 17 deletions(-) diff --git a/scripts/convert_wan_to_diffusers.py b/scripts/convert_wan_to_diffusers.py index 599c90be57..39a364b07d 100644 --- a/scripts/convert_wan_to_diffusers.py +++ b/scripts/convert_wan_to_diffusers.py @@ -278,6 +278,29 @@ def get_transformer_config(model_type: str) -> Tuple[Dict[str, Any], ...]: } RENAME_DICT = VACE_TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = VACE_TRANSFORMER_SPECIAL_KEYS_REMAP + elif model_type == "Wan2.2-VACE-Fun-14B": + config = { + "model_id": "alibaba-pai/Wan2.2-VACE-Fun-A14B", + "diffusers_config": { + "added_kv_proj_dim": None, + "attention_head_dim": 128, + "cross_attn_norm": True, + "eps": 1e-06, + "ffn_dim": 13824, + "freq_dim": 256, + "in_channels": 16, + "num_attention_heads": 40, + "num_layers": 40, + "out_channels": 16, + "patch_size": [1, 2, 2], + "qk_norm": "rms_norm_across_heads", + "text_dim": 4096, + "vace_layers": [0, 5, 10, 15, 20, 25, 30, 35], + "vace_in_channels": 96, + }, + } + RENAME_DICT = VACE_TRANSFORMER_KEYS_RENAME_DICT + SPECIAL_KEYS_REMAP = VACE_TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-I2V-14B-720p": config = { "model_id": "Wan-AI/Wan2.2-I2V-A14B", @@ -975,7 +998,17 @@ if __name__ == "__main__": image_encoder=image_encoder, image_processor=image_processor, ) - elif "VACE" in args.model_type: + elif "Wan2.2-VACE" in args.model_type: + pipe = WanVACEPipeline( + transformer=transformer, + transformer_2=transformer_2, + text_encoder=text_encoder, + tokenizer=tokenizer, + vae=vae, + scheduler=scheduler, + boundary_ratio=0.875, + ) + elif "Wan-VACE" in args.model_type: pipe = WanVACEPipeline( transformer=transformer, text_encoder=text_encoder, diff --git a/src/diffusers/pipelines/wan/pipeline_wan_vace.py b/src/diffusers/pipelines/wan/pipeline_wan_vace.py index 99e1f5116b..eab1aacfc5 100644 --- a/src/diffusers/pipelines/wan/pipeline_wan_vace.py +++ b/src/diffusers/pipelines/wan/pipeline_wan_vace.py @@ -152,16 +152,26 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. - transformer ([`WanTransformer3DModel`]): + transformer ([`WanVACETransformer3DModel`]): Conditional Transformer to denoise the input latents. + transformer_2 ([`WanVACETransformer3DModel`], *optional*): + Conditional Transformer to denoise the input latents during the low-noise stage. In two-stage denoising, + `transformer` handles high-noise stages and `transformer_2` handles low-noise stages. If not provided, only + `transformer` is used. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + boundary_ratio (`float`, *optional*, defaults to `None`): + Ratio of total timesteps to use as the boundary for switching between transformers in two-stage denoising. + The actual boundary timestep is calculated as `boundary_ratio * num_train_timesteps`. When provided, + `transformer` handles timesteps >= boundary_timestep and `transformer_2` handles timesteps < + boundary_timestep. If `None`, only `transformer` is used for the entire denoising process. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + _optional_components = ["transformer_2"] def __init__( self, @@ -170,6 +180,8 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): transformer: WanVACETransformer3DModel, vae: AutoencoderKLWan, scheduler: FlowMatchEulerDiscreteScheduler, + transformer_2: WanVACETransformer3DModel = None, + boundary_ratio: Optional[float] = None, ): super().__init__() @@ -178,9 +190,10 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, + transformer_2=transformer_2, scheduler=scheduler, ) - + self.register_to_config(boundary_ratio=boundary_ratio) self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) @@ -321,6 +334,7 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): video=None, mask=None, reference_images=None, + guidance_scale_2=None, ): base = self.vae_scale_factor_spatial * self.transformer.config.patch_size[1] if height % base != 0 or width % base != 0: @@ -332,6 +346,8 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) + if self.config.boundary_ratio is None and guidance_scale_2 is not None: + raise ValueError("`guidance_scale_2` is only supported when the pipeline's `boundary_ratio` is not None.") if prompt is not None and prompt_embeds is not None: raise ValueError( @@ -667,6 +683,7 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): num_frames: int = 81, num_inference_steps: int = 50, guidance_scale: float = 5.0, + guidance_scale_2: Optional[float] = None, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -728,6 +745,10 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + guidance_scale_2 (`float`, *optional*, defaults to `None`): + Guidance scale for the low-noise stage transformer (`transformer_2`). If `None` and the pipeline's + `boundary_ratio` is not None, uses the same value as `guidance_scale`. Only used when `transformer_2` + and the pipeline's `boundary_ratio` are not None. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -793,6 +814,7 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): video, mask, reference_images, + guidance_scale_2, ) if num_frames % self.vae_scale_factor_temporal != 1: @@ -802,7 +824,11 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) + if self.config.boundary_ratio is not None and guidance_scale_2 is None: + guidance_scale_2 = guidance_scale + self._guidance_scale = guidance_scale + self._guidance_scale_2 = guidance_scale_2 self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False @@ -896,36 +922,53 @@ class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) + if self.config.boundary_ratio is not None: + boundary_timestep = self.config.boundary_ratio * self.scheduler.config.num_train_timesteps + else: + boundary_timestep = None + with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t + + if boundary_timestep is None or t >= boundary_timestep: + # wan2.1 or high-noise stage in wan2.2 + current_model = self.transformer + current_guidance_scale = guidance_scale + else: + # low-noise stage in wan2.2 + current_model = self.transformer_2 + current_guidance_scale = guidance_scale_2 + latent_model_input = latents.to(transformer_dtype) timestep = t.expand(latents.shape[0]) - noise_pred = self.transformer( - hidden_states=latent_model_input, - timestep=timestep, - encoder_hidden_states=prompt_embeds, - control_hidden_states=conditioning_latents, - control_hidden_states_scale=conditioning_scale, - attention_kwargs=attention_kwargs, - return_dict=False, - )[0] - - if self.do_classifier_free_guidance: - noise_uncond = self.transformer( + with current_model.cache_context("cond"): + noise_pred = current_model( hidden_states=latent_model_input, timestep=timestep, - encoder_hidden_states=negative_prompt_embeds, + encoder_hidden_states=prompt_embeds, control_hidden_states=conditioning_latents, control_hidden_states_scale=conditioning_scale, attention_kwargs=attention_kwargs, return_dict=False, )[0] - noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) + + if self.do_classifier_free_guidance: + with current_model.cache_context("uncond"): + noise_uncond = current_model( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=negative_prompt_embeds, + control_hidden_states=conditioning_latents, + control_hidden_states_scale=conditioning_scale, + attention_kwargs=attention_kwargs, + return_dict=False, + )[0] + noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] diff --git a/tests/pipelines/wan/test_wan_vace.py b/tests/pipelines/wan/test_wan_vace.py index ed13d5649d..f99863c880 100644 --- a/tests/pipelines/wan/test_wan_vace.py +++ b/tests/pipelines/wan/test_wan_vace.py @@ -87,6 +87,7 @@ class WanVACEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, + "transformer_2": None, } return components From 751e250f70cf446ae342c8a860d92f6a8b78261a Mon Sep 17 00:00:00 2001 From: Samarth Agrawal <41808786+SammyAgrawal@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:48:48 -0700 Subject: [PATCH 66/74] fixed bug in defining embed dim for UNet1D (#12111) * fixed bug in defining embed dim * matched 1d temb process to 2d * Update src/diffusers/models/unets/unet_1d.py Co-authored-by: Dhruv Nair --------- Co-authored-by: Dhruv Nair --- src/diffusers/models/unets/unet_1d.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/unets/unet_1d.py b/src/diffusers/models/unets/unet_1d.py index 4f57f3349b..4c4c528a59 100644 --- a/src/diffusers/models/unets/unet_1d.py +++ b/src/diffusers/models/unets/unet_1d.py @@ -82,6 +82,7 @@ class UNet1DModel(ModelMixin, ConfigMixin): out_channels: int = 2, extra_in_channels: int = 0, time_embedding_type: str = "fourier", + time_embedding_dim: Optional[int] = None, flip_sin_to_cos: bool = True, use_timestep_embedding: bool = False, freq_shift: float = 0.0, @@ -100,15 +101,23 @@ class UNet1DModel(ModelMixin, ConfigMixin): # time if time_embedding_type == "fourier": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 + if time_embed_dim % 2 != 0: + raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( - embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos + embedding_size=time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) - timestep_input_dim = 2 * block_out_channels[0] + timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps( block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=freq_shift ) timestep_input_dim = block_out_channels[0] + else: + raise ValueError( + f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." + ) if use_timestep_embedding: time_embed_dim = block_out_channels[0] * 4 From 8c72cd12ee65e420c86a0724f0182f966f339a7e Mon Sep 17 00:00:00 2001 From: Sari Hleihil <57064636+sarihl@users.noreply.github.com> Date: Wed, 17 Sep 2025 02:41:05 +0300 Subject: [PATCH 67/74] Added LucyEditPipeline (#12340) * Added LucyEditPipeline * add import & stype missing copied from * Fix example doc string --------- Co-authored-by: yiyixuxu --- src/diffusers/__init__.py | 2 + src/diffusers/pipelines/__init__.py | 2 + src/diffusers/pipelines/lucy/__init__.py | 47 ++ .../pipelines/lucy/pipeline_lucy_edit.py | 735 ++++++++++++++++++ .../pipelines/lucy/pipeline_output.py | 20 + .../dummy_torch_and_transformers_objects.py | 15 + 6 files changed, 821 insertions(+) create mode 100644 src/diffusers/pipelines/lucy/__init__.py create mode 100644 src/diffusers/pipelines/lucy/pipeline_lucy_edit.py create mode 100644 src/diffusers/pipelines/lucy/pipeline_output.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index d96acc3818..167d39c6e8 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -495,6 +495,7 @@ else: "LTXImageToVideoPipeline", "LTXLatentUpsamplePipeline", "LTXPipeline", + "LucyEditPipeline", "Lumina2Pipeline", "Lumina2Text2ImgPipeline", "LuminaPipeline", @@ -1149,6 +1150,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: LTXImageToVideoPipeline, LTXLatentUpsamplePipeline, LTXPipeline, + LucyEditPipeline, Lumina2Pipeline, Lumina2Text2ImgPipeline, LuminaPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 8ed07a72e3..17f3fc909e 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -285,6 +285,7 @@ else: ] _import_structure["lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"] _import_structure["lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"] + _import_structure["lucy"] = ["LucyEditPipeline"] _import_structure["marigold"].extend( [ "MarigoldDepthPipeline", @@ -682,6 +683,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: LEditsPPPipelineStableDiffusionXL, ) from .ltx import LTXConditionPipeline, LTXImageToVideoPipeline, LTXLatentUpsamplePipeline, LTXPipeline + from .lucy import LucyEditPipeline from .lumina import LuminaPipeline, LuminaText2ImgPipeline from .lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline from .marigold import ( diff --git a/src/diffusers/pipelines/lucy/__init__.py b/src/diffusers/pipelines/lucy/__init__.py new file mode 100644 index 0000000000..580e1f37f3 --- /dev/null +++ b/src/diffusers/pipelines/lucy/__init__.py @@ -0,0 +1,47 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_lucy_edit"] = ["LucyEditPipeline"] +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_lucy_edit import LucyEditPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/pipelines/lucy/pipeline_lucy_edit.py b/src/diffusers/pipelines/lucy/pipeline_lucy_edit.py new file mode 100644 index 0000000000..69f69d5768 --- /dev/null +++ b/src/diffusers/pipelines/lucy/pipeline_lucy_edit.py @@ -0,0 +1,735 @@ +# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. +# Copyright 2025 The Decart AI Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications by Decart AI Team: +# - Based on pipeline_wan.py, but with supports recieving a condition video appended to the channel dimension. + +import html +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import regex as re +import torch +from PIL import Image +from transformers import AutoTokenizer, UMT5EncoderModel + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...loaders import WanLoraLoaderMixin +from ...models import AutoencoderKLWan, WanTransformer3DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import LucyPipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_ftfy_available(): + import ftfy + + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> from typing import List + + >>> import torch + >>> from PIL import Image + + >>> from diffusers import AutoencoderKLWan, LucyEditPipeline + >>> from diffusers.utils import export_to_video, load_video + + >>> # Arguments + >>> url = "https://d2drjpuinn46lb.cloudfront.net/painter_original_edit.mp4" + >>> prompt = "Change the apron and blouse to a classic clown costume: satin polka-dot jumpsuit in bright primary colors, ruffled white collar, oversized pom-pom buttons, white gloves, oversized red shoes, red foam nose; soft window light from left, eye-level medium shot, natural folds and fabric highlights." + >>> negative_prompt = "" + >>> num_frames = 81 + >>> height = 480 + >>> width = 832 + + + >>> # Load video + >>> def convert_video(video: List[Image.Image]) -> List[Image.Image]: + ... video = load_video(url)[:num_frames] + ... video = [video[i].resize((width, height)) for i in range(num_frames)] + ... return video + + + >>> video = load_video(url, convert_method=convert_video) + + >>> # Load model + >>> model_id = "decart-ai/Lucy-Edit-Dev" + >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) + >>> pipe = LucyEditPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + + >>> # Generate video + >>> output = pipe( + ... prompt=prompt, + ... video=video, + ... negative_prompt=negative_prompt, + ... height=480, + ... width=832, + ... num_frames=81, + ... guidance_scale=5.0, + ... ).frames[0] + + >>> # Export video + >>> export_to_video(output, "output.mp4", fps=24) + ``` +""" + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +def prompt_clean(text): + text = whitespace_clean(basic_clean(text)) + return text + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class LucyEditPipeline(DiffusionPipeline, WanLoraLoaderMixin): + r""" + Pipeline for video-to-video generation using Lucy Edit. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + tokenizer ([`T5Tokenizer`]): + Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), + specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. + text_encoder ([`T5EncoderModel`]): + [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically + the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. + transformer ([`WanTransformer3DModel`]): + Conditional Transformer to denoise the input latents. + scheduler ([`UniPCMultistepScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKLWan`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + transformer_2 ([`WanTransformer3DModel`], *optional*): + Conditional Transformer to denoise the input latents during the low-noise stage. If provided, enables + two-stage denoising where `transformer` handles high-noise stages and `transformer_2` handles low-noise + stages. If not provided, only `transformer` is used. + boundary_ratio (`float`, *optional*, defaults to `None`): + Ratio of total timesteps to use as the boundary for switching between transformers in two-stage denoising. + The actual boundary timestep is calculated as `boundary_ratio * num_train_timesteps`. When provided, + `transformer` handles timesteps >= boundary_timestep and `transformer_2` handles timesteps < + boundary_timestep. If `None`, only `transformer` is used for the entire denoising process. + """ + + model_cpu_offload_seq = "text_encoder->transformer->transformer_2->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + _optional_components = ["transformer", "transformer_2"] + + def __init__( + self, + tokenizer: AutoTokenizer, + text_encoder: UMT5EncoderModel, + vae: AutoencoderKLWan, + scheduler: FlowMatchEulerDiscreteScheduler, + transformer: Optional[WanTransformer3DModel] = None, + transformer_2: Optional[WanTransformer3DModel] = None, + boundary_ratio: Optional[float] = None, + expand_timesteps: bool = False, # Wan2.2 ti2v + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + transformer=transformer, + scheduler=scheduler, + transformer_2=transformer_2, + ) + self.register_to_config(boundary_ratio=boundary_ratio) + self.register_to_config(expand_timesteps=expand_timesteps) + self.vae_scale_factor_temporal = self.vae.config.scale_factor_temporal if getattr(self, "vae", None) else 4 + self.vae_scale_factor_spatial = self.vae.config.scale_factor_spatial if getattr(self, "vae", None) else 8 + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + + # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_videos_per_prompt: int = 1, + max_sequence_length: int = 226, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + prompt = [prompt_clean(u) for u in prompt] + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask + seq_lens = mask.gt(0).sum(dim=1).long() + + prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] + prompt_embeds = torch.stack( + [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 + ) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + do_classifier_free_guidance: bool = True, + num_videos_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + max_sequence_length: int = 226, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + Whether to use classifier free guidance or not. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + Number of videos that should be generated per prompt. torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + device: (`torch.device`, *optional*): + torch device + dtype: (`torch.dtype`, *optional*): + torch dtype + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt, + num_videos_per_prompt=num_videos_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + dtype=dtype, + ) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embeds = self._get_t5_prompt_embeds( + prompt=negative_prompt, + num_videos_per_prompt=num_videos_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + dtype=dtype, + ) + + return prompt_embeds, negative_prompt_embeds + + def check_inputs( + self, + video, + prompt, + negative_prompt, + height, + width, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + guidance_scale_2=None, + ): + if height % 16 != 0 or width % 16 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif negative_prompt is not None and ( + not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) + ): + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + if self.config.boundary_ratio is None and guidance_scale_2 is not None: + raise ValueError("`guidance_scale_2` is only supported when the pipeline's `boundary_ratio` is not None.") + + if video is None: + raise ValueError("`video` is required, received None.") + + def prepare_latents( + self, + video: Optional[torch.Tensor] = None, + batch_size: int = 1, + num_channels_latents: int = 16, + height: int = 480, + width: int = 832, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + num_latent_frames = ( + (video.size(2) - 1) // self.vae_scale_factor_temporal + 1 if latents is None else latents.size(1) + ) + shape = ( + batch_size, + num_channels_latents, + num_latent_frames, + height // self.vae_scale_factor_spatial, + width // self.vae_scale_factor_spatial, + ) + # Prepare noise latents + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # Prepare condition latents + condition_latents = [ + retrieve_latents(self.vae.encode(vid.unsqueeze(0)), sample_mode="argmax") for vid in video + ] + + condition_latents = torch.cat(condition_latents, dim=0).to(dtype) + + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1).to(device, dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + device, dtype + ) + + condition_latents = (condition_latents - latents_mean) * latents_std + + # Check shapes + assert latents.shape == condition_latents.shape, ( + f"Latents shape {latents.shape} does not match expected shape {condition_latents.shape}. Please check the input." + ) + + return latents, condition_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1.0 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def current_timestep(self): + return self._current_timestep + + @property + def interrupt(self): + return self._interrupt + + @property + def attention_kwargs(self): + return self._attention_kwargs + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + video: List[Image.Image], + prompt: Union[str, List[str]] = None, + negative_prompt: Union[str, List[str]] = None, + height: int = 480, + width: int = 832, + num_frames: int = 81, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + guidance_scale_2: Optional[float] = None, + num_videos_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "np", + return_dict: bool = True, + attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + The call function to the pipeline for generation. + + Args: + video (`List[Image.Image]`): + The video to use as the condition for the video generation. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, pass `prompt_embeds` instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to avoid during image generation. If not defined, pass `negative_prompt_embeds` + instead. Ignored when not using guidance (`guidance_scale` < `1`). + height (`int`, defaults to `480`): + The height in pixels of the generated image. + width (`int`, defaults to `832`): + The width in pixels of the generated image. + num_frames (`int`, defaults to `81`): + The number of frames in the generated video. + num_inference_steps (`int`, defaults to `50`): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, defaults to `5.0`): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. + guidance_scale_2 (`float`, *optional*, defaults to `None`): + Guidance scale for the low-noise stage transformer (`transformer_2`). If `None` and the pipeline's + `boundary_ratio` is not None, uses the same value as `guidance_scale`. Only used when `transformer_2` + and the pipeline's `boundary_ratio` are not None. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`LucyPipelineOutput`] instead of a plain tuple. + attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int`, defaults to `512`): + The maximum sequence length of the text encoder. If the prompt is longer than this, it will be + truncated. If the prompt is shorter, it will be padded to this length. + + Examples: + + Returns: + [`~LucyPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`LucyPipelineOutput`] is returned, otherwise a `tuple` is returned where + the first element is a list with the generated images and the second element is a list of `bool`s + indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + video, + prompt, + negative_prompt, + height, + width, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + guidance_scale_2, + ) + + if num_frames % self.vae_scale_factor_temporal != 1: + logger.warning( + f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." + ) + num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 + num_frames = max(num_frames, 1) + + if self.config.boundary_ratio is not None and guidance_scale_2 is None: + guidance_scale_2 = guidance_scale + + self._guidance_scale = guidance_scale + self._guidance_scale_2 = guidance_scale_2 + self._attention_kwargs = attention_kwargs + self._current_timestep = None + self._interrupt = False + + device = self._execution_device + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + negative_prompt=negative_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + num_videos_per_prompt=num_videos_per_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + max_sequence_length=max_sequence_length, + device=device, + ) + + transformer_dtype = self.transformer.dtype if self.transformer is not None else self.transformer_2.dtype + prompt_embeds = prompt_embeds.to(transformer_dtype) + if negative_prompt_embeds is not None: + negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = ( + self.transformer.config.out_channels + if self.transformer is not None + else self.transformer_2.config.out_channels + ) + video = self.video_processor.preprocess_video(video, height=height, width=width).to( + device, dtype=torch.float32 + ) + latents, condition_latents = self.prepare_latents( + video, + batch_size * num_videos_per_prompt, + num_channels_latents, + height, + width, + torch.float32, + device, + generator, + latents, + ) + + mask = torch.ones(latents.shape, dtype=torch.float32, device=device) + + # 6. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + + if self.config.boundary_ratio is not None: + boundary_timestep = self.config.boundary_ratio * self.scheduler.config.num_train_timesteps + else: + boundary_timestep = None + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + + if boundary_timestep is None or t >= boundary_timestep: + # wan2.1 or high-noise stage in wan2.2 + current_model = self.transformer + current_guidance_scale = guidance_scale + else: + # low-noise stage in wan2.2 + current_model = self.transformer_2 + current_guidance_scale = guidance_scale_2 + + # latent_model_input = latents.to(transformer_dtype) + latent_model_input = torch.cat([latents, condition_latents], dim=1).to(transformer_dtype) + # latent_model_input = torch.cat([latents, latents], dim=1).to(transformer_dtype) + if self.config.expand_timesteps: + # seq_len: num_latent_frames * latent_height//2 * latent_width//2 + temp_ts = (mask[0][0][:, ::2, ::2] * t).flatten() + # batch_size, seq_len + timestep = temp_ts.unsqueeze(0).expand(latents.shape[0], -1) + else: + timestep = t.expand(latents.shape[0]) + + with current_model.cache_context("cond"): + noise_pred = current_model( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + attention_kwargs=attention_kwargs, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + with current_model.cache_context("uncond"): + noise_uncond = current_model( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=negative_prompt_embeds, + attention_kwargs=attention_kwargs, + return_dict=False, + )[0] + noise_pred = noise_uncond + current_guidance_scale * (noise_pred - noise_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + self._current_timestep = None + + if not output_type == "latent": + latents = latents.to(self.vae.dtype) + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.vae.config.z_dim, 1, 1, 1) + .to(latents.device, latents.dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + latents.device, latents.dtype + ) + latents = latents / latents_std + latents_mean + video = self.vae.decode(latents, return_dict=False)[0] + video = self.video_processor.postprocess_video(video, output_type=output_type) + else: + video = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return LucyPipelineOutput(frames=video) diff --git a/src/diffusers/pipelines/lucy/pipeline_output.py b/src/diffusers/pipelines/lucy/pipeline_output.py new file mode 100644 index 0000000000..cf9ea91fd1 --- /dev/null +++ b/src/diffusers/pipelines/lucy/pipeline_output.py @@ -0,0 +1,20 @@ +from dataclasses import dataclass + +import torch + +from diffusers.utils import BaseOutput + + +@dataclass +class LucyPipelineOutput(BaseOutput): + r""" + Output class for Lucy pipelines. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing + denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)`. + """ + + frames: torch.Tensor diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 00792fa55a..e29be174f0 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1592,6 +1592,21 @@ class LTXPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class LucyEditPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class Lumina2Pipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] From d06750a5fd19781de68066bb34a3520af83cf124 Mon Sep 17 00:00:00 2001 From: Zijian Zhou Date: Wed, 17 Sep 2025 00:43:15 +0100 Subject: [PATCH 68/74] Fix autoencoder_kl_wan.py bugs for Wan2.2 VAE (#12335) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update autoencoder_kl_wan.py When using the Wan2.2 VAE, the spatial compression ratio calculated here is incorrect. It should be 16 instead of 8. Pass it in directly via the config to ensure it’s correct here. * Update autoencoder_kl_wan.py --- src/diffusers/models/autoencoders/autoencoder_kl_wan.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/diffusers/models/autoencoders/autoencoder_kl_wan.py b/src/diffusers/models/autoencoders/autoencoder_kl_wan.py index d84a0861e9..e6e58c1cce 100644 --- a/src/diffusers/models/autoencoders/autoencoder_kl_wan.py +++ b/src/diffusers/models/autoencoders/autoencoder_kl_wan.py @@ -1052,7 +1052,7 @@ class AutoencoderKLWan(ModelMixin, ConfigMixin, FromOriginalModelMixin): is_residual=is_residual, ) - self.spatial_compression_ratio = 2 ** len(self.temperal_downsample) + self.spatial_compression_ratio = scale_factor_spatial # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. @@ -1145,12 +1145,13 @@ class AutoencoderKLWan(ModelMixin, ConfigMixin, FromOriginalModelMixin): def _encode(self, x: torch.Tensor): _, _, num_frame, height, width = x.shape - if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): - return self.tiled_encode(x) - self.clear_cache() if self.config.patch_size is not None: x = patchify(x, patch_size=self.config.patch_size) + + if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): + return self.tiled_encode(x) + iter_ = 1 + (num_frame - 1) // 4 for i in range(iter_): self._enc_conv_idx = [0] From efb7a299af46d739dec6a57a5d2814165fba24b5 Mon Sep 17 00:00:00 2001 From: DefTruth <31974251+DefTruth@users.noreply.github.com> Date: Wed, 17 Sep 2025 12:52:15 +0800 Subject: [PATCH 69/74] Fix many type hint errors (#12289) * fix hidream type hint * fix hunyuan-video type hint * fix many type hint * fix many type hint errors * fix many type hint errors * fix many type hint errors * make stype & make quality --- src/diffusers/models/attention.py | 2 +- .../models/transformers/auraflow_transformer_2d.py | 10 +++++----- .../models/transformers/cogvideox_transformer_3d.py | 4 ++-- .../models/transformers/consisid_transformer_3d.py | 4 ++-- src/diffusers/models/transformers/lumina_nextdit2d.py | 6 +++--- src/diffusers/models/transformers/transformer_bria.py | 4 ++-- .../models/transformers/transformer_cogview3plus.py | 6 +++--- .../models/transformers/transformer_cogview4.py | 4 ++-- .../models/transformers/transformer_hidream_image.py | 10 +++++----- .../models/transformers/transformer_hunyuan_video.py | 6 +++--- .../transformer_hunyuan_video_framepack.py | 4 ++-- 11 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index c720b37955..c99133f257 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -674,7 +674,7 @@ class JointTransformerBlock(nn.Module): encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor, joint_attention_kwargs: Optional[Dict[str, Any]] = None, - ): + ) -> Tuple[torch.Tensor, torch.Tensor]: joint_attention_kwargs = joint_attention_kwargs or {} if self.use_dual_attention: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_hidden_states2, gate_msa2 = self.norm1( diff --git a/src/diffusers/models/transformers/auraflow_transformer_2d.py b/src/diffusers/models/transformers/auraflow_transformer_2d.py index a8d275d142..4d7d1ba40e 100644 --- a/src/diffusers/models/transformers/auraflow_transformer_2d.py +++ b/src/diffusers/models/transformers/auraflow_transformer_2d.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn @@ -92,7 +92,7 @@ class AuraFlowPatchEmbed(nn.Module): return selected_indices - def forward(self, latent): + def forward(self, latent) -> torch.Tensor: batch_size, num_channels, height, width = latent.size() latent = latent.view( batch_size, @@ -173,7 +173,7 @@ class AuraFlowSingleTransformerBlock(nn.Module): hidden_states: torch.FloatTensor, temb: torch.FloatTensor, attention_kwargs: Optional[Dict[str, Any]] = None, - ): + ) -> torch.Tensor: residual = hidden_states attention_kwargs = attention_kwargs or {} @@ -242,7 +242,7 @@ class AuraFlowJointTransformerBlock(nn.Module): encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor, attention_kwargs: Optional[Dict[str, Any]] = None, - ): + ) -> Tuple[torch.Tensor, torch.Tensor]: residual = hidden_states residual_context = encoder_hidden_states attention_kwargs = attention_kwargs or {} @@ -472,7 +472,7 @@ class AuraFlowTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, From timestep: torch.LongTensor = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, - ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) diff --git a/src/diffusers/models/transformers/cogvideox_transformer_3d.py b/src/diffusers/models/transformers/cogvideox_transformer_3d.py index a8c98bccb8..5038109690 100644 --- a/src/diffusers/models/transformers/cogvideox_transformer_3d.py +++ b/src/diffusers/models/transformers/cogvideox_transformer_3d.py @@ -122,7 +122,7 @@ class CogVideoXBlock(nn.Module): temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: text_seq_length = encoder_hidden_states.size(1) attention_kwargs = attention_kwargs or {} @@ -441,7 +441,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, Cac image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, - ): + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) diff --git a/src/diffusers/models/transformers/consisid_transformer_3d.py b/src/diffusers/models/transformers/consisid_transformer_3d.py index 41632dbd47..91fe811f00 100644 --- a/src/diffusers/models/transformers/consisid_transformer_3d.py +++ b/src/diffusers/models/transformers/consisid_transformer_3d.py @@ -315,7 +315,7 @@ class ConsisIDBlock(nn.Module): encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: text_seq_length = encoder_hidden_states.size(1) # norm & modulate @@ -691,7 +691,7 @@ class ConsisIDTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): id_cond: Optional[torch.Tensor] = None, id_vit_hidden: Optional[torch.Tensor] = None, return_dict: bool = True, - ): + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) diff --git a/src/diffusers/models/transformers/lumina_nextdit2d.py b/src/diffusers/models/transformers/lumina_nextdit2d.py index 84b1175386..bed5e69c2d 100644 --- a/src/diffusers/models/transformers/lumina_nextdit2d.py +++ b/src/diffusers/models/transformers/lumina_nextdit2d.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn @@ -124,7 +124,7 @@ class LuminaNextDiTBlock(nn.Module): encoder_mask: torch.Tensor, temb: torch.Tensor, cross_attention_kwargs: Optional[Dict[str, Any]] = None, - ): + ) -> torch.Tensor: """ Perform a forward pass through the LuminaNextDiTBlock. @@ -297,7 +297,7 @@ class LuminaNextDiT2DModel(ModelMixin, ConfigMixin): image_rotary_emb: torch.Tensor, cross_attention_kwargs: Dict[str, Any] = None, return_dict=True, - ) -> torch.Tensor: + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: """ Forward pass of LuminaNextDiT. diff --git a/src/diffusers/models/transformers/transformer_bria.py b/src/diffusers/models/transformers/transformer_bria.py index 27a9941501..04a9c5645c 100644 --- a/src/diffusers/models/transformers/transformer_bria.py +++ b/src/diffusers/models/transformers/transformer_bria.py @@ -472,7 +472,7 @@ class BriaSingleTransformerBlock(nn.Module): temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: text_seq_len = encoder_hidden_states.shape[1] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) @@ -588,7 +588,7 @@ class BriaTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOrig return_dict: bool = True, controlnet_block_samples=None, controlnet_single_block_samples=None, - ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: """ The [`BriaTransformer2DModel`] forward method. diff --git a/src/diffusers/models/transformers/transformer_cogview3plus.py b/src/diffusers/models/transformers/transformer_cogview3plus.py index 77f15f6ca6..7356f4a606 100644 --- a/src/diffusers/models/transformers/transformer_cogview3plus.py +++ b/src/diffusers/models/transformers/transformer_cogview3plus.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Dict, Union +from typing import Dict, Tuple, Union import torch import torch.nn as nn @@ -79,7 +79,7 @@ class CogView3PlusTransformerBlock(nn.Module): hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, emb: torch.Tensor, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: text_seq_length = encoder_hidden_states.size(1) # norm & modulate @@ -293,7 +293,7 @@ class CogView3PlusTransformer2DModel(ModelMixin, ConfigMixin): target_size: torch.Tensor, crop_coords: torch.Tensor, return_dict: bool = True, - ) -> Union[torch.Tensor, Transformer2DModelOutput]: + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: """ The [`CogView3PlusTransformer2DModel`] forward method. diff --git a/src/diffusers/models/transformers/transformer_cogview4.py b/src/diffusers/models/transformers/transformer_cogview4.py index 25dcfa14cc..64e9a538a7 100644 --- a/src/diffusers/models/transformers/transformer_cogview4.py +++ b/src/diffusers/models/transformers/transformer_cogview4.py @@ -494,7 +494,7 @@ class CogView4TransformerBlock(nn.Module): ] = None, attention_mask: Optional[Dict[str, torch.Tensor]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: # 1. Timestep conditioning ( norm_hidden_states, @@ -717,7 +717,7 @@ class CogView4Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, Cach image_rotary_emb: Optional[ Union[Tuple[torch.Tensor, torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]] ] = None, - ) -> Union[torch.Tensor, Transformer2DModelOutput]: + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) diff --git a/src/diffusers/models/transformers/transformer_hidream_image.py b/src/diffusers/models/transformers/transformer_hidream_image.py index 77902dcf58..4a5aee29ab 100644 --- a/src/diffusers/models/transformers/transformer_hidream_image.py +++ b/src/diffusers/models/transformers/transformer_hidream_image.py @@ -55,7 +55,7 @@ class HiDreamImageTimestepEmbed(nn.Module): self.time_proj = Timesteps(num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=frequency_embedding_size, time_embed_dim=hidden_size) - def forward(self, timesteps: torch.Tensor, wdtype: Optional[torch.dtype] = None): + def forward(self, timesteps: torch.Tensor, wdtype: Optional[torch.dtype] = None) -> torch.Tensor: t_emb = self.time_proj(timesteps).to(dtype=wdtype) t_emb = self.timestep_embedder(t_emb) return t_emb @@ -87,7 +87,7 @@ class HiDreamImagePatchEmbed(nn.Module): self.out_channels = out_channels self.proj = nn.Linear(in_channels * patch_size * patch_size, out_channels, bias=True) - def forward(self, latent): + def forward(self, latent) -> torch.Tensor: latent = self.proj(latent) return latent @@ -534,7 +534,7 @@ class HiDreamImageTransformerBlock(nn.Module): encoder_hidden_states: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: torch.Tensor = None, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: wtype = hidden_states.dtype ( shift_msa_i, @@ -592,7 +592,7 @@ class HiDreamBlock(nn.Module): encoder_hidden_states: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: torch.Tensor = None, - ) -> torch.Tensor: + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: return self.block( hidden_states=hidden_states, hidden_states_masks=hidden_states_masks, @@ -786,7 +786,7 @@ class HiDreamImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, **kwargs, - ): + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: encoder_hidden_states = kwargs.get("encoder_hidden_states", None) if encoder_hidden_states is not None: diff --git a/src/diffusers/models/transformers/transformer_hunyuan_video.py b/src/diffusers/models/transformers/transformer_hunyuan_video.py index 6944a6c536..bc857ccab4 100644 --- a/src/diffusers/models/transformers/transformer_hunyuan_video.py +++ b/src/diffusers/models/transformers/transformer_hunyuan_video.py @@ -529,7 +529,7 @@ class HunyuanVideoSingleTransformerBlock(nn.Module): image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: text_seq_length = encoder_hidden_states.shape[1] hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) @@ -684,7 +684,7 @@ class HunyuanVideoTokenReplaceSingleTransformerBlock(nn.Module): image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, token_replace_emb: torch.Tensor = None, num_tokens: int = None, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: text_seq_length = encoder_hidden_states.shape[1] hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) @@ -1038,7 +1038,7 @@ class HunyuanVideoTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, guidance: torch.Tensor = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, - ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]: + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) diff --git a/src/diffusers/models/transformers/transformer_hunyuan_video_framepack.py b/src/diffusers/models/transformers/transformer_hunyuan_video_framepack.py index c2eb7fd2a7..60b40fff3c 100644 --- a/src/diffusers/models/transformers/transformer_hunyuan_video_framepack.py +++ b/src/diffusers/models/transformers/transformer_hunyuan_video_framepack.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn @@ -216,7 +216,7 @@ class HunyuanVideoFramepackTransformer3DModel( indices_latents_history_4x: Optional[torch.Tensor] = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, - ): + ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) From eda9ff8300eb3b8ceec15ef69d74e35abd3d39b3 Mon Sep 17 00:00:00 2001 From: Fredy Date: Thu, 18 Sep 2025 00:03:43 -0600 Subject: [PATCH 70/74] Add RequestScopedPipeline for safe concurrent inference, tokenizer lock and non-mutating retrieve_timesteps (#12328) * Basic implementation of request scheduling * Basic editing in SD and Flux Pipelines * Small Fix * Fix * Update for more pipelines * Add examples/server-async * Add examples/server-async * Updated RequestScopedPipeline to handle a single tokenizer lock to avoid race conditions * Fix * Fix _TokenizerLockWrapper * Fix _TokenizerLockWrapper * Delete _TokenizerLockWrapper * Fix tokenizer * Update examples/server-async * Fix server-async * Optimizations in examples/server-async * We keep the implementation simple in examples/server-async * Update examples/server-async/README.md * Update examples/server-async/README.md for changes to tokenizer locks and backward-compatible retrieve_timesteps * The changes to the diffusers core have been undone and all logic is being moved to exmaples/server-async * Update examples/server-async/utils/* * Fix BaseAsyncScheduler * Rollback in the core of the diffusers * Update examples/server-async/README.md * Complete rollback of diffusers core files * Simple implementation of an asynchronous server compatible with SD3-3.5 and Flux Pipelines * Update examples/server-async/README.md * Fixed import errors in 'examples/server-async/serverasync.py' * Flux Pipeline Discard * Update examples/server-async/README.md * Apply style fixes --------- Co-authored-by: Sayak Paul Co-authored-by: github-actions[bot] --- examples/server-async/Pipelines.py | 91 ++++++ examples/server-async/README.md | 171 ++++++++++ examples/server-async/requirements.txt | 10 + examples/server-async/serverasync.py | 230 ++++++++++++++ examples/server-async/test.py | 65 ++++ examples/server-async/utils/__init__.py | 2 + .../utils/requestscopedpipeline.py | 296 ++++++++++++++++++ examples/server-async/utils/scheduler.py | 141 +++++++++ examples/server-async/utils/utils.py | 48 +++ 9 files changed, 1054 insertions(+) create mode 100644 examples/server-async/Pipelines.py create mode 100644 examples/server-async/README.md create mode 100644 examples/server-async/requirements.txt create mode 100644 examples/server-async/serverasync.py create mode 100644 examples/server-async/test.py create mode 100644 examples/server-async/utils/__init__.py create mode 100644 examples/server-async/utils/requestscopedpipeline.py create mode 100644 examples/server-async/utils/scheduler.py create mode 100644 examples/server-async/utils/utils.py diff --git a/examples/server-async/Pipelines.py b/examples/server-async/Pipelines.py new file mode 100644 index 0000000000..f89cac6a7e --- /dev/null +++ b/examples/server-async/Pipelines.py @@ -0,0 +1,91 @@ +import logging +import os +from dataclasses import dataclass, field +from typing import List + +import torch +from pydantic import BaseModel + +from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3 import StableDiffusion3Pipeline + + +logger = logging.getLogger(__name__) + + +class TextToImageInput(BaseModel): + model: str + prompt: str + size: str | None = None + n: int | None = None + + +@dataclass +class PresetModels: + SD3: List[str] = field(default_factory=lambda: ["stabilityai/stable-diffusion-3-medium"]) + SD3_5: List[str] = field( + default_factory=lambda: [ + "stabilityai/stable-diffusion-3.5-large", + "stabilityai/stable-diffusion-3.5-large-turbo", + "stabilityai/stable-diffusion-3.5-medium", + ] + ) + + +class TextToImagePipelineSD3: + def __init__(self, model_path: str | None = None): + self.model_path = model_path or os.getenv("MODEL_PATH") + self.pipeline: StableDiffusion3Pipeline | None = None + self.device: str | None = None + + def start(self): + if torch.cuda.is_available(): + model_path = self.model_path or "stabilityai/stable-diffusion-3.5-large" + logger.info("Loading CUDA") + self.device = "cuda" + self.pipeline = StableDiffusion3Pipeline.from_pretrained( + model_path, + torch_dtype=torch.float16, + ).to(device=self.device) + elif torch.backends.mps.is_available(): + model_path = self.model_path or "stabilityai/stable-diffusion-3.5-medium" + logger.info("Loading MPS for Mac M Series") + self.device = "mps" + self.pipeline = StableDiffusion3Pipeline.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + ).to(device=self.device) + else: + raise Exception("No CUDA or MPS device available") + + +class ModelPipelineInitializer: + def __init__(self, model: str = "", type_models: str = "t2im"): + self.model = model + self.type_models = type_models + self.pipeline = None + self.device = "cuda" if torch.cuda.is_available() else "mps" + self.model_type = None + + def initialize_pipeline(self): + if not self.model: + raise ValueError("Model name not provided") + + # Check if model exists in PresetModels + preset_models = PresetModels() + + # Determine which model type we're dealing with + if self.model in preset_models.SD3: + self.model_type = "SD3" + elif self.model in preset_models.SD3_5: + self.model_type = "SD3_5" + + # Create appropriate pipeline based on model type and type_models + if self.type_models == "t2im": + if self.model_type in ["SD3", "SD3_5"]: + self.pipeline = TextToImagePipelineSD3(self.model) + else: + raise ValueError(f"Model type {self.model_type} not supported for text-to-image") + elif self.type_models == "t2v": + raise ValueError(f"Unsupported type_models: {self.type_models}") + + return self.pipeline diff --git a/examples/server-async/README.md b/examples/server-async/README.md new file mode 100644 index 0000000000..a47ab7c7f2 --- /dev/null +++ b/examples/server-async/README.md @@ -0,0 +1,171 @@ +# Asynchronous server and parallel execution of models + +> Example/demo server that keeps a single model in memory while safely running parallel inference requests by creating per-request lightweight views and cloning only small, stateful components (schedulers, RNG state, small mutable attrs). Works with StableDiffusion3 pipelines. +> We recommend running 10 to 50 inferences in parallel for optimal performance, averaging between 25 and 30 seconds to 1 minute and 1 minute and 30 seconds. (This is only recommended if you have a GPU with 35GB of VRAM or more; otherwise, keep it to one or two inferences in parallel to avoid decoding or saving errors due to memory shortages.) + +## ⚠️ IMPORTANT + +* The example demonstrates how to run pipelines like `StableDiffusion3-3.5` concurrently while keeping a single copy of the heavy model parameters on GPU. + +## Necessary components + +All the components needed to create the inference server are in the current directory: + +``` +server-async/ +├── utils/ +├─────── __init__.py +├─────── scheduler.py # BaseAsyncScheduler wrapper and async_retrieve_timesteps for secure inferences +├─────── requestscopedpipeline.py # RequestScoped Pipeline for inference with a single in-memory model +├─────── utils.py # Image/video saving utilities and service configuration +├── Pipelines.py # pipeline loader classes (SD3) +├── serverasync.py # FastAPI app with lifespan management and async inference endpoints +├── test.py # Client test script for inference requests +├── requirements.txt # Dependencies +└── README.md # This documentation +``` + +## What `diffusers-async` adds / Why we needed it + +Core problem: a naive server that calls `pipe.__call__` concurrently can hit **race conditions** (e.g., `scheduler.set_timesteps` mutates shared state) or explode memory by deep-copying the whole pipeline per-request. + +`diffusers-async` / this example addresses that by: + +* **Request-scoped views**: `RequestScopedPipeline` creates a shallow copy of the pipeline per request so heavy weights (UNet, VAE, text encoder) remain shared and *are not duplicated*. +* **Per-request mutable state**: stateful small objects (scheduler, RNG state, small lists/dicts, callbacks) are cloned per request. The system uses `BaseAsyncScheduler.clone_for_request(...)` for scheduler cloning, with fallback to safe `deepcopy` or other heuristics. +* **Tokenizer concurrency safety**: `RequestScopedPipeline` now manages an internal tokenizer lock with automatic tokenizer detection and wrapping. This ensures that Rust tokenizers are safe to use under concurrency — race condition errors like `Already borrowed` no longer occur. +* **`async_retrieve_timesteps(..., return_scheduler=True)`**: fully retro-compatible helper that returns `(timesteps, num_inference_steps, scheduler)` without mutating the shared scheduler. For users not using `return_scheduler=True`, the behavior is identical to the original API. +* **Robust attribute handling**: wrapper avoids writing to read-only properties (e.g., `components`) and auto-detects small mutable attributes to clone while avoiding duplication of large tensors. Configurable tensor size threshold prevents cloning of large tensors. +* **Enhanced scheduler wrapping**: `BaseAsyncScheduler` automatically wraps schedulers with improved `__getattr__`, `__setattr__`, and debugging methods (`__repr__`, `__str__`). + +## How the server works (high-level flow) + +1. **Single model instance** is loaded into memory (GPU/MPS) when the server starts. +2. On each HTTP inference request: + + * The server uses `RequestScopedPipeline.generate(...)` which: + + * automatically wraps the base scheduler in `BaseAsyncScheduler` (if not already wrapped), + * obtains a *local scheduler* (via `clone_for_request()` or `deepcopy`), + * does `local_pipe = copy.copy(base_pipe)` (shallow copy), + * sets `local_pipe.scheduler = local_scheduler` (if possible), + * clones only small mutable attributes (callbacks, rng, small latents) with auto-detection, + * wraps tokenizers with thread-safe locks to prevent race conditions, + * optionally enters a `model_cpu_offload_context()` for memory offload hooks, + * calls the pipeline on the local view (`local_pipe(...)`). +3. **Result**: inference completes, images are moved to CPU & saved (if requested), internal buffers freed (GC + `torch.cuda.empty_cache()`). +4. Multiple requests can run in parallel while sharing heavy weights and isolating mutable state. + +## How to set up and run the server + +### 1) Install dependencies + +Recommended: create a virtualenv / conda environment. + +```bash +pip install diffusers +pip install -r requirements.txt +``` + +### 2) Start the server + +Using the `serverasync.py` file that already has everything you need: + +```bash +python serverasync.py +``` + +The server will start on `http://localhost:8500` by default with the following features: +- FastAPI application with async lifespan management +- Automatic model loading and pipeline initialization +- Request counting and active inference tracking +- Memory cleanup after each inference +- CORS middleware for cross-origin requests + +### 3) Test the server + +Use the included test script: + +```bash +python test.py +``` + +Or send a manual request: + +`POST /api/diffusers/inference` with JSON body: + +```json +{ + "prompt": "A futuristic cityscape, vibrant colors", + "num_inference_steps": 30, + "num_images_per_prompt": 1 +} +``` + +Response example: + +```json +{ + "response": ["http://localhost:8500/images/img123.png"] +} +``` + +### 4) Server endpoints + +- `GET /` - Welcome message +- `POST /api/diffusers/inference` - Main inference endpoint +- `GET /images/{filename}` - Serve generated images +- `GET /api/status` - Server status and memory info + +## Advanced Configuration + +### RequestScopedPipeline Parameters + +```python +RequestScopedPipeline( + pipeline, # Base pipeline to wrap + mutable_attrs=None, # Custom list of attributes to clone + auto_detect_mutables=True, # Enable automatic detection of mutable attributes + tensor_numel_threshold=1_000_000, # Tensor size threshold for cloning + tokenizer_lock=None, # Custom threading lock for tokenizers + wrap_scheduler=True # Auto-wrap scheduler in BaseAsyncScheduler +) +``` + +### BaseAsyncScheduler Features + +* Transparent proxy to the original scheduler with `__getattr__` and `__setattr__` +* `clone_for_request()` method for safe per-request scheduler cloning +* Enhanced debugging with `__repr__` and `__str__` methods +* Full compatibility with existing scheduler APIs + +### Server Configuration + +The server configuration can be modified in `serverasync.py` through the `ServerConfigModels` dataclass: + +```python +@dataclass +class ServerConfigModels: + model: str = 'stabilityai/stable-diffusion-3.5-medium' + type_models: str = 't2im' + host: str = '0.0.0.0' + port: int = 8500 +``` + +## Troubleshooting (quick) + +* `Already borrowed` — previously a Rust tokenizer concurrency error. + ✅ This is now fixed: `RequestScopedPipeline` automatically detects and wraps tokenizers with thread locks, so race conditions no longer happen. + +* `can't set attribute 'components'` — pipeline exposes read-only `components`. + ✅ The RequestScopedPipeline now detects read-only properties and skips setting them automatically. + +* Scheduler issues: + * If the scheduler doesn't implement `clone_for_request` and `deepcopy` fails, we log and fallback — but prefer `async_retrieve_timesteps(..., return_scheduler=True)` to avoid mutating the shared scheduler. + ✅ Note: `async_retrieve_timesteps` is fully retro-compatible — if you don't pass `return_scheduler=True`, the behavior is unchanged. + +* Memory issues with large tensors: + ✅ The system now has configurable `tensor_numel_threshold` to prevent cloning of large tensors while still cloning small mutable ones. + +* Automatic tokenizer detection: + ✅ The system automatically identifies tokenizer components by checking for tokenizer methods, class names, and attributes, then applies thread-safe wrappers. \ No newline at end of file diff --git a/examples/server-async/requirements.txt b/examples/server-async/requirements.txt new file mode 100644 index 0000000000..aafa93b702 --- /dev/null +++ b/examples/server-async/requirements.txt @@ -0,0 +1,10 @@ +torch +torchvision +transformers +sentencepiece +fastapi +uvicorn +ftfy +accelerate +xformers +protobuf \ No newline at end of file diff --git a/examples/server-async/serverasync.py b/examples/server-async/serverasync.py new file mode 100644 index 0000000000..b279b36f9a --- /dev/null +++ b/examples/server-async/serverasync.py @@ -0,0 +1,230 @@ +import asyncio +import gc +import logging +import os +import random +import threading +from contextlib import asynccontextmanager +from dataclasses import dataclass +from typing import Any, Dict, Optional, Type + +import torch +from fastapi import FastAPI, HTTPException, Request +from fastapi.concurrency import run_in_threadpool +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import FileResponse +from Pipelines import ModelPipelineInitializer +from pydantic import BaseModel + +from utils import RequestScopedPipeline, Utils + + +@dataclass +class ServerConfigModels: + model: str = "stabilityai/stable-diffusion-3.5-medium" + type_models: str = "t2im" + constructor_pipeline: Optional[Type] = None + custom_pipeline: Optional[Type] = None + components: Optional[Dict[str, Any]] = None + torch_dtype: Optional[torch.dtype] = None + host: str = "0.0.0.0" + port: int = 8500 + + +server_config = ServerConfigModels() + + +@asynccontextmanager +async def lifespan(app: FastAPI): + logging.basicConfig(level=logging.INFO) + app.state.logger = logging.getLogger("diffusers-server") + os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128,expandable_segments:True" + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + + app.state.total_requests = 0 + app.state.active_inferences = 0 + app.state.metrics_lock = asyncio.Lock() + app.state.metrics_task = None + + app.state.utils_app = Utils( + host=server_config.host, + port=server_config.port, + ) + + async def metrics_loop(): + try: + while True: + async with app.state.metrics_lock: + total = app.state.total_requests + active = app.state.active_inferences + app.state.logger.info(f"[METRICS] total_requests={total} active_inferences={active}") + await asyncio.sleep(5) + except asyncio.CancelledError: + app.state.logger.info("Metrics loop cancelled") + raise + + app.state.metrics_task = asyncio.create_task(metrics_loop()) + + try: + yield + finally: + task = app.state.metrics_task + if task: + task.cancel() + try: + await task + except asyncio.CancelledError: + pass + + try: + stop_fn = getattr(model_pipeline, "stop", None) or getattr(model_pipeline, "close", None) + if callable(stop_fn): + await run_in_threadpool(stop_fn) + except Exception as e: + app.state.logger.warning(f"Error during pipeline shutdown: {e}") + + app.state.logger.info("Lifespan shutdown complete") + + +app = FastAPI(lifespan=lifespan) + +logger = logging.getLogger("DiffusersServer.Pipelines") + + +initializer = ModelPipelineInitializer( + model=server_config.model, + type_models=server_config.type_models, +) +model_pipeline = initializer.initialize_pipeline() +model_pipeline.start() + +request_pipe = RequestScopedPipeline(model_pipeline.pipeline) +pipeline_lock = threading.Lock() + +logger.info(f"Pipeline initialized and ready to receive requests (model ={server_config.model})") + +app.state.MODEL_INITIALIZER = initializer +app.state.MODEL_PIPELINE = model_pipeline +app.state.REQUEST_PIPE = request_pipe +app.state.PIPELINE_LOCK = pipeline_lock + + +class JSONBodyQueryAPI(BaseModel): + model: str | None = None + prompt: str + negative_prompt: str | None = None + num_inference_steps: int = 28 + num_images_per_prompt: int = 1 + + +@app.middleware("http") +async def count_requests_middleware(request: Request, call_next): + async with app.state.metrics_lock: + app.state.total_requests += 1 + response = await call_next(request) + return response + + +@app.get("/") +async def root(): + return {"message": "Welcome to the Diffusers Server"} + + +@app.post("/api/diffusers/inference") +async def api(json: JSONBodyQueryAPI): + prompt = json.prompt + negative_prompt = json.negative_prompt or "" + num_steps = json.num_inference_steps + num_images_per_prompt = json.num_images_per_prompt + + wrapper = app.state.MODEL_PIPELINE + initializer = app.state.MODEL_INITIALIZER + + utils_app = app.state.utils_app + + if not wrapper or not wrapper.pipeline: + raise HTTPException(500, "Model not initialized correctly") + if not prompt.strip(): + raise HTTPException(400, "No prompt provided") + + def make_generator(): + g = torch.Generator(device=initializer.device) + return g.manual_seed(random.randint(0, 10_000_000)) + + req_pipe = app.state.REQUEST_PIPE + + def infer(): + gen = make_generator() + return req_pipe.generate( + prompt=prompt, + negative_prompt=negative_prompt, + generator=gen, + num_inference_steps=num_steps, + num_images_per_prompt=num_images_per_prompt, + device=initializer.device, + output_type="pil", + ) + + try: + async with app.state.metrics_lock: + app.state.active_inferences += 1 + + output = await run_in_threadpool(infer) + + async with app.state.metrics_lock: + app.state.active_inferences = max(0, app.state.active_inferences - 1) + + urls = [utils_app.save_image(img) for img in output.images] + return {"response": urls} + + except Exception as e: + async with app.state.metrics_lock: + app.state.active_inferences = max(0, app.state.active_inferences - 1) + logger.error(f"Error during inference: {e}") + raise HTTPException(500, f"Error in processing: {e}") + + finally: + if torch.cuda.is_available(): + torch.cuda.synchronize() + torch.cuda.empty_cache() + torch.cuda.reset_peak_memory_stats() + torch.cuda.ipc_collect() + gc.collect() + + +@app.get("/images/{filename}") +async def serve_image(filename: str): + utils_app = app.state.utils_app + file_path = os.path.join(utils_app.image_dir, filename) + if not os.path.isfile(file_path): + raise HTTPException(status_code=404, detail="Image not found") + return FileResponse(file_path, media_type="image/png") + + +@app.get("/api/status") +async def get_status(): + memory_info = {} + if torch.cuda.is_available(): + memory_allocated = torch.cuda.memory_allocated() / 1024**3 # GB + memory_reserved = torch.cuda.memory_reserved() / 1024**3 # GB + memory_info = { + "memory_allocated_gb": round(memory_allocated, 2), + "memory_reserved_gb": round(memory_reserved, 2), + "device": torch.cuda.get_device_name(0), + } + + return {"current_model": server_config.model, "type_models": server_config.type_models, "memory": memory_info} + + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +if __name__ == "__main__": + import uvicorn + + uvicorn.run(app, host=server_config.host, port=server_config.port) diff --git a/examples/server-async/test.py b/examples/server-async/test.py new file mode 100644 index 0000000000..e67317ea8f --- /dev/null +++ b/examples/server-async/test.py @@ -0,0 +1,65 @@ +import os +import time +import urllib.parse + +import requests + + +SERVER_URL = "http://localhost:8500/api/diffusers/inference" +BASE_URL = "http://localhost:8500" +DOWNLOAD_FOLDER = "generated_images" +WAIT_BEFORE_DOWNLOAD = 2 # seconds + +os.makedirs(DOWNLOAD_FOLDER, exist_ok=True) + + +def save_from_url(url: str) -> str: + """Download the given URL (relative or absolute) and save it locally.""" + if url.startswith("/"): + direct = BASE_URL.rstrip("/") + url + else: + direct = url + resp = requests.get(direct, timeout=60) + resp.raise_for_status() + filename = os.path.basename(urllib.parse.urlparse(direct).path) or f"img_{int(time.time())}.png" + path = os.path.join(DOWNLOAD_FOLDER, filename) + with open(path, "wb") as f: + f.write(resp.content) + return path + + +def main(): + payload = { + "prompt": "The T-800 Terminator Robot Returning From The Future, Anime Style", + "num_inference_steps": 30, + "num_images_per_prompt": 1, + } + + print("Sending request...") + try: + r = requests.post(SERVER_URL, json=payload, timeout=480) + r.raise_for_status() + except Exception as e: + print(f"Request failed: {e}") + return + + body = r.json().get("response", []) + # Normalize to a list + urls = body if isinstance(body, list) else [body] if body else [] + if not urls: + print("No URLs found in the response. Check the server output.") + return + + print(f"Received {len(urls)} URL(s). Waiting {WAIT_BEFORE_DOWNLOAD}s before downloading...") + time.sleep(WAIT_BEFORE_DOWNLOAD) + + for u in urls: + try: + path = save_from_url(u) + print(f"Image saved to: {path}") + except Exception as e: + print(f"Error downloading {u}: {e}") + + +if __name__ == "__main__": + main() diff --git a/examples/server-async/utils/__init__.py b/examples/server-async/utils/__init__.py new file mode 100644 index 0000000000..731cfe491a --- /dev/null +++ b/examples/server-async/utils/__init__.py @@ -0,0 +1,2 @@ +from .requestscopedpipeline import RequestScopedPipeline +from .utils import Utils diff --git a/examples/server-async/utils/requestscopedpipeline.py b/examples/server-async/utils/requestscopedpipeline.py new file mode 100644 index 0000000000..57d1e25671 --- /dev/null +++ b/examples/server-async/utils/requestscopedpipeline.py @@ -0,0 +1,296 @@ +import copy +import threading +from typing import Any, Iterable, List, Optional + +import torch + +from diffusers.utils import logging + +from .scheduler import BaseAsyncScheduler, async_retrieve_timesteps + + +logger = logging.get_logger(__name__) + + +def safe_tokenize(tokenizer, *args, lock, **kwargs): + with lock: + return tokenizer(*args, **kwargs) + + +class RequestScopedPipeline: + DEFAULT_MUTABLE_ATTRS = [ + "_all_hooks", + "_offload_device", + "_progress_bar_config", + "_progress_bar", + "_rng_state", + "_last_seed", + "latents", + ] + + def __init__( + self, + pipeline: Any, + mutable_attrs: Optional[Iterable[str]] = None, + auto_detect_mutables: bool = True, + tensor_numel_threshold: int = 1_000_000, + tokenizer_lock: Optional[threading.Lock] = None, + wrap_scheduler: bool = True, + ): + self._base = pipeline + self.unet = getattr(pipeline, "unet", None) + self.vae = getattr(pipeline, "vae", None) + self.text_encoder = getattr(pipeline, "text_encoder", None) + self.components = getattr(pipeline, "components", None) + + if wrap_scheduler and hasattr(pipeline, "scheduler") and pipeline.scheduler is not None: + if not isinstance(pipeline.scheduler, BaseAsyncScheduler): + pipeline.scheduler = BaseAsyncScheduler(pipeline.scheduler) + + self._mutable_attrs = list(mutable_attrs) if mutable_attrs is not None else list(self.DEFAULT_MUTABLE_ATTRS) + self._tokenizer_lock = tokenizer_lock if tokenizer_lock is not None else threading.Lock() + + self._auto_detect_mutables = bool(auto_detect_mutables) + self._tensor_numel_threshold = int(tensor_numel_threshold) + + self._auto_detected_attrs: List[str] = [] + + def _make_local_scheduler(self, num_inference_steps: int, device: Optional[str] = None, **clone_kwargs): + base_sched = getattr(self._base, "scheduler", None) + if base_sched is None: + return None + + if not isinstance(base_sched, BaseAsyncScheduler): + wrapped_scheduler = BaseAsyncScheduler(base_sched) + else: + wrapped_scheduler = base_sched + + try: + return wrapped_scheduler.clone_for_request( + num_inference_steps=num_inference_steps, device=device, **clone_kwargs + ) + except Exception as e: + logger.debug(f"clone_for_request failed: {e}; falling back to deepcopy()") + try: + return copy.deepcopy(wrapped_scheduler) + except Exception as e: + logger.warning(f"Deepcopy of scheduler failed: {e}. Returning original scheduler (*risky*).") + return wrapped_scheduler + + def _autodetect_mutables(self, max_attrs: int = 40): + if not self._auto_detect_mutables: + return [] + + if self._auto_detected_attrs: + return self._auto_detected_attrs + + candidates: List[str] = [] + seen = set() + for name in dir(self._base): + if name.startswith("__"): + continue + if name in self._mutable_attrs: + continue + if name in ("to", "save_pretrained", "from_pretrained"): + continue + try: + val = getattr(self._base, name) + except Exception: + continue + + import types + + # skip callables and modules + if callable(val) or isinstance(val, (types.ModuleType, types.FunctionType, types.MethodType)): + continue + + # containers -> candidate + if isinstance(val, (dict, list, set, tuple, bytearray)): + candidates.append(name) + seen.add(name) + else: + # try Tensor detection + try: + if isinstance(val, torch.Tensor): + if val.numel() <= self._tensor_numel_threshold: + candidates.append(name) + seen.add(name) + else: + logger.debug(f"Ignoring large tensor attr '{name}', numel={val.numel()}") + except Exception: + continue + + if len(candidates) >= max_attrs: + break + + self._auto_detected_attrs = candidates + logger.debug(f"Autodetected mutable attrs to clone: {self._auto_detected_attrs}") + return self._auto_detected_attrs + + def _is_readonly_property(self, base_obj, attr_name: str) -> bool: + try: + cls = type(base_obj) + descriptor = getattr(cls, attr_name, None) + if isinstance(descriptor, property): + return descriptor.fset is None + if hasattr(descriptor, "__set__") is False and descriptor is not None: + return False + except Exception: + pass + return False + + def _clone_mutable_attrs(self, base, local): + attrs_to_clone = list(self._mutable_attrs) + attrs_to_clone.extend(self._autodetect_mutables()) + + EXCLUDE_ATTRS = { + "components", + } + + for attr in attrs_to_clone: + if attr in EXCLUDE_ATTRS: + logger.debug(f"Skipping excluded attr '{attr}'") + continue + if not hasattr(base, attr): + continue + if self._is_readonly_property(base, attr): + logger.debug(f"Skipping read-only property '{attr}'") + continue + + try: + val = getattr(base, attr) + except Exception as e: + logger.debug(f"Could not getattr('{attr}') on base pipeline: {e}") + continue + + try: + if isinstance(val, dict): + setattr(local, attr, dict(val)) + elif isinstance(val, (list, tuple, set)): + setattr(local, attr, list(val)) + elif isinstance(val, bytearray): + setattr(local, attr, bytearray(val)) + else: + # small tensors or atomic values + if isinstance(val, torch.Tensor): + if val.numel() <= self._tensor_numel_threshold: + setattr(local, attr, val.clone()) + else: + # don't clone big tensors, keep reference + setattr(local, attr, val) + else: + try: + setattr(local, attr, copy.copy(val)) + except Exception: + setattr(local, attr, val) + except (AttributeError, TypeError) as e: + logger.debug(f"Skipping cloning attribute '{attr}' because it is not settable: {e}") + continue + except Exception as e: + logger.debug(f"Unexpected error cloning attribute '{attr}': {e}") + continue + + def _is_tokenizer_component(self, component) -> bool: + if component is None: + return False + + tokenizer_methods = ["encode", "decode", "tokenize", "__call__"] + has_tokenizer_methods = any(hasattr(component, method) for method in tokenizer_methods) + + class_name = component.__class__.__name__.lower() + has_tokenizer_in_name = "tokenizer" in class_name + + tokenizer_attrs = ["vocab_size", "pad_token", "eos_token", "bos_token"] + has_tokenizer_attrs = any(hasattr(component, attr) for attr in tokenizer_attrs) + + return has_tokenizer_methods and (has_tokenizer_in_name or has_tokenizer_attrs) + + def generate(self, *args, num_inference_steps: int = 50, device: Optional[str] = None, **kwargs): + local_scheduler = self._make_local_scheduler(num_inference_steps=num_inference_steps, device=device) + + try: + local_pipe = copy.copy(self._base) + except Exception as e: + logger.warning(f"copy.copy(self._base) failed: {e}. Falling back to deepcopy (may increase memory).") + local_pipe = copy.deepcopy(self._base) + + if local_scheduler is not None: + try: + timesteps, num_steps, configured_scheduler = async_retrieve_timesteps( + local_scheduler.scheduler, + num_inference_steps=num_inference_steps, + device=device, + return_scheduler=True, + **{k: v for k, v in kwargs.items() if k in ["timesteps", "sigmas"]}, + ) + + final_scheduler = BaseAsyncScheduler(configured_scheduler) + setattr(local_pipe, "scheduler", final_scheduler) + except Exception: + logger.warning("Could not set scheduler on local pipe; proceeding without replacing scheduler.") + + self._clone_mutable_attrs(self._base, local_pipe) + + # 4) wrap tokenizers on the local pipe with the lock wrapper + tokenizer_wrappers = {} # name -> original_tokenizer + try: + # a) wrap direct tokenizer attributes (tokenizer, tokenizer_2, ...) + for name in dir(local_pipe): + if "tokenizer" in name and not name.startswith("_"): + tok = getattr(local_pipe, name, None) + if tok is not None and self._is_tokenizer_component(tok): + tokenizer_wrappers[name] = tok + setattr( + local_pipe, + name, + lambda *args, tok=tok, **kwargs: safe_tokenize( + tok, *args, lock=self._tokenizer_lock, **kwargs + ), + ) + + # b) wrap tokenizers in components dict + if hasattr(local_pipe, "components") and isinstance(local_pipe.components, dict): + for key, val in local_pipe.components.items(): + if val is None: + continue + + if self._is_tokenizer_component(val): + tokenizer_wrappers[f"components[{key}]"] = val + local_pipe.components[key] = lambda *args, tokenizer=val, **kwargs: safe_tokenize( + tokenizer, *args, lock=self._tokenizer_lock, **kwargs + ) + + except Exception as e: + logger.debug(f"Tokenizer wrapping step encountered an error: {e}") + + result = None + cm = getattr(local_pipe, "model_cpu_offload_context", None) + try: + if callable(cm): + try: + with cm(): + result = local_pipe(*args, num_inference_steps=num_inference_steps, **kwargs) + except TypeError: + # cm might be a context manager instance rather than callable + try: + with cm: + result = local_pipe(*args, num_inference_steps=num_inference_steps, **kwargs) + except Exception as e: + logger.debug(f"model_cpu_offload_context usage failed: {e}. Proceeding without it.") + result = local_pipe(*args, num_inference_steps=num_inference_steps, **kwargs) + else: + # no offload context available — call directly + result = local_pipe(*args, num_inference_steps=num_inference_steps, **kwargs) + + return result + + finally: + try: + for name, tok in tokenizer_wrappers.items(): + if name.startswith("components["): + key = name[len("components[") : -1] + local_pipe.components[key] = tok + else: + setattr(local_pipe, name, tok) + except Exception as e: + logger.debug(f"Error restoring wrapped tokenizers: {e}") diff --git a/examples/server-async/utils/scheduler.py b/examples/server-async/utils/scheduler.py new file mode 100644 index 0000000000..86d47cac61 --- /dev/null +++ b/examples/server-async/utils/scheduler.py @@ -0,0 +1,141 @@ +import copy +import inspect +from typing import Any, List, Optional, Union + +import torch + + +class BaseAsyncScheduler: + def __init__(self, scheduler: Any): + self.scheduler = scheduler + + def __getattr__(self, name: str): + if hasattr(self.scheduler, name): + return getattr(self.scheduler, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, name: str, value): + if name == "scheduler": + super().__setattr__(name, value) + else: + if hasattr(self, "scheduler") and hasattr(self.scheduler, name): + setattr(self.scheduler, name, value) + else: + super().__setattr__(name, value) + + def clone_for_request(self, num_inference_steps: int, device: Union[str, torch.device, None] = None, **kwargs): + local = copy.deepcopy(self.scheduler) + local.set_timesteps(num_inference_steps=num_inference_steps, device=device, **kwargs) + cloned = self.__class__(local) + return cloned + + def __repr__(self): + return f"BaseAsyncScheduler({repr(self.scheduler)})" + + def __str__(self): + return f"BaseAsyncScheduler wrapping: {str(self.scheduler)}" + + +def async_retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. + Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Backwards compatible: by default the function behaves exactly as before and returns + (timesteps_tensor, num_inference_steps) + + If the caller passes `return_scheduler=True` in kwargs, the function will **not** mutate the passed + scheduler. Instead it will use a cloned scheduler if available (via `scheduler.clone_for_request`) + or a deepcopy fallback, call `set_timesteps` on that cloned scheduler, and return: + (timesteps_tensor, num_inference_steps, scheduler_in_use) + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Optional kwargs: + return_scheduler (bool, default False): if True, return (timesteps, num_inference_steps, scheduler_in_use) + where `scheduler_in_use` is a scheduler instance that already has timesteps set. + This mode will prefer `scheduler.clone_for_request(...)` if available, to avoid mutating the original scheduler. + + Returns: + `(timesteps_tensor, num_inference_steps)` by default (backwards compatible), or + `(timesteps_tensor, num_inference_steps, scheduler_in_use)` if `return_scheduler=True`. + """ + # pop our optional control kwarg (keeps compatibility) + return_scheduler = bool(kwargs.pop("return_scheduler", False)) + + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + + # choose scheduler to call set_timesteps on + scheduler_in_use = scheduler + if return_scheduler: + # Do not mutate the provided scheduler: prefer to clone if possible + if hasattr(scheduler, "clone_for_request"): + try: + # clone_for_request may accept num_inference_steps or other kwargs; be permissive + scheduler_in_use = scheduler.clone_for_request( + num_inference_steps=num_inference_steps or 0, device=device + ) + except Exception: + scheduler_in_use = copy.deepcopy(scheduler) + else: + # fallback deepcopy (scheduler tends to be smallish - acceptable) + scheduler_in_use = copy.deepcopy(scheduler) + + # helper to test if set_timesteps supports a particular kwarg + def _accepts(param_name: str) -> bool: + try: + return param_name in set(inspect.signature(scheduler_in_use.set_timesteps).parameters.keys()) + except (ValueError, TypeError): + # if signature introspection fails, be permissive and attempt the call later + return False + + # now call set_timesteps on the chosen scheduler_in_use (may be original or clone) + if timesteps is not None: + accepts_timesteps = _accepts("timesteps") + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler_in_use.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler_in_use.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps_out = scheduler_in_use.timesteps + num_inference_steps = len(timesteps_out) + elif sigmas is not None: + accept_sigmas = _accepts("sigmas") + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler_in_use.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler_in_use.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps_out = scheduler_in_use.timesteps + num_inference_steps = len(timesteps_out) + else: + # default path + scheduler_in_use.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps_out = scheduler_in_use.timesteps + + if return_scheduler: + return timesteps_out, num_inference_steps, scheduler_in_use + return timesteps_out, num_inference_steps diff --git a/examples/server-async/utils/utils.py b/examples/server-async/utils/utils.py new file mode 100644 index 0000000000..9f94330512 --- /dev/null +++ b/examples/server-async/utils/utils.py @@ -0,0 +1,48 @@ +import gc +import logging +import os +import tempfile +import uuid + +import torch + + +logger = logging.getLogger(__name__) + + +class Utils: + def __init__(self, host: str = "0.0.0.0", port: int = 8500): + self.service_url = f"http://{host}:{port}" + self.image_dir = os.path.join(tempfile.gettempdir(), "images") + if not os.path.exists(self.image_dir): + os.makedirs(self.image_dir) + + self.video_dir = os.path.join(tempfile.gettempdir(), "videos") + if not os.path.exists(self.video_dir): + os.makedirs(self.video_dir) + + def save_image(self, image): + if hasattr(image, "to"): + try: + image = image.to("cpu") + except Exception: + pass + + if isinstance(image, torch.Tensor): + from torchvision import transforms + + to_pil = transforms.ToPILImage() + image = to_pil(image.squeeze(0).clamp(0, 1)) + + filename = "img" + str(uuid.uuid4()).split("-")[0] + ".png" + image_path = os.path.join(self.image_dir, filename) + logger.info(f"Saving image to {image_path}") + + image.save(image_path, format="PNG", optimize=True) + + del image + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + return os.path.join(self.service_url, "images", filename) From 7e7e62c6fff46471b569cdb07b690c70514c71fc Mon Sep 17 00:00:00 2001 From: Dave Lage Date: Thu, 18 Sep 2025 03:11:22 -0400 Subject: [PATCH 71/74] Convert alphas for embedders for sd-scripts to ai toolkit conversion (#12332) * Convert alphas for embedders for sd-scripts to ai toolkit conversion * Add kohya embedders conversion test * Apply style fixes --------- Co-authored-by: Sayak Paul Co-authored-by: github-actions[bot] --- .../loaders/lora_conversion_utils.py | 86 +++++++++---------- tests/lora/test_lora_layers_flux.py | 7 ++ 2 files changed, 46 insertions(+), 47 deletions(-) diff --git a/src/diffusers/loaders/lora_conversion_utils.py b/src/diffusers/loaders/lora_conversion_utils.py index 6f584a5f0e..89afb6529a 100644 --- a/src/diffusers/loaders/lora_conversion_utils.py +++ b/src/diffusers/loaders/lora_conversion_utils.py @@ -558,70 +558,62 @@ def _convert_kohya_flux_lora_to_diffusers(state_dict): ait_sd[target_key] = value if any("guidance_in" in k for k in sds_sd): - assign_remaining_weights( - [ - ( - "time_text_embed.guidance_embedder.linear_1.{lora_key}.weight", - "lora_unet_guidance_in_in_layer.{orig_lora_key}.weight", - None, - ), - ( - "time_text_embed.guidance_embedder.linear_2.{lora_key}.weight", - "lora_unet_guidance_in_out_layer.{orig_lora_key}.weight", - None, - ), - ], + _convert_to_ai_toolkit( sds_sd, + ait_sd, + "lora_unet_guidance_in_in_layer", + "time_text_embed.guidance_embedder.linear_1", + ) + + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + "lora_unet_guidance_in_out_layer", + "time_text_embed.guidance_embedder.linear_2", ) if any("img_in" in k for k in sds_sd): - assign_remaining_weights( - [ - ("x_embedder.{lora_key}.weight", "lora_unet_img_in.{orig_lora_key}.weight", None), - ], + _convert_to_ai_toolkit( sds_sd, + ait_sd, + "lora_unet_img_in", + "x_embedder", ) if any("txt_in" in k for k in sds_sd): - assign_remaining_weights( - [ - ("context_embedder.{lora_key}.weight", "lora_unet_txt_in.{orig_lora_key}.weight", None), - ], + _convert_to_ai_toolkit( sds_sd, + ait_sd, + "lora_unet_txt_in", + "context_embedder", ) if any("time_in" in k for k in sds_sd): - assign_remaining_weights( - [ - ( - "time_text_embed.timestep_embedder.linear_1.{lora_key}.weight", - "lora_unet_time_in_in_layer.{orig_lora_key}.weight", - None, - ), - ( - "time_text_embed.timestep_embedder.linear_2.{lora_key}.weight", - "lora_unet_time_in_out_layer.{orig_lora_key}.weight", - None, - ), - ], + _convert_to_ai_toolkit( sds_sd, + ait_sd, + "lora_unet_time_in_in_layer", + "time_text_embed.timestep_embedder.linear_1", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + "lora_unet_time_in_out_layer", + "time_text_embed.timestep_embedder.linear_2", ) if any("vector_in" in k for k in sds_sd): - assign_remaining_weights( - [ - ( - "time_text_embed.text_embedder.linear_1.{lora_key}.weight", - "lora_unet_vector_in_in_layer.{orig_lora_key}.weight", - None, - ), - ( - "time_text_embed.text_embedder.linear_2.{lora_key}.weight", - "lora_unet_vector_in_out_layer.{orig_lora_key}.weight", - None, - ), - ], + _convert_to_ai_toolkit( sds_sd, + ait_sd, + "lora_unet_vector_in_in_layer", + "time_text_embed.text_embedder.linear_1", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + "lora_unet_vector_in_out_layer", + "time_text_embed.text_embedder.linear_2", ) if any("final_layer" in k for k in sds_sd): diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 7d99bcad80..e6048f509f 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -907,6 +907,13 @@ class FluxLoRAIntegrationTests(unittest.TestCase): assert max_diff < 1e-3 + def test_flux_kohya_embedders_conversion(self): + """Test that embedders load without throwing errors""" + self.pipeline.load_lora_weights("rockerBOO/flux-bpo-po-lora") + self.pipeline.unload_lora_weights() + + assert True + def test_flux_xlabs(self): self.pipeline.load_lora_weights("XLabs-AI/flux-lora-collection", weight_name="disney_lora.safetensors") self.pipeline.fuse_lora() From edd614ea38a7136cf66e1cb4dad696233c17358c Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Sat, 20 Sep 2025 05:31:40 +0200 Subject: [PATCH 72/74] [CI] Fix TRANSFORMERS_FLAX_WEIGHTS_NAME import issue (#12354) update --- .../pipelines/pipeline_loading_utils.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/diffusers/pipelines/pipeline_loading_utils.py b/src/diffusers/pipelines/pipeline_loading_utils.py index ee767eddcc..388128df0e 100644 --- a/src/diffusers/pipelines/pipeline_loading_utils.py +++ b/src/diffusers/pipelines/pipeline_loading_utils.py @@ -48,10 +48,12 @@ from .transformers_loading_utils import _load_tokenizer_from_dduf, _load_transfo if is_transformers_available(): import transformers from transformers import PreTrainedModel, PreTrainedTokenizerBase - from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME + if is_transformers_version("<=", "4.56.2"): + from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME + if is_accelerate_available(): import accelerate from accelerate import dispatch_model @@ -112,7 +114,9 @@ def is_safetensors_compatible(filenames, passed_components=None, folder_names=No ] if is_transformers_available(): - weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] + weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME] + if is_transformers_version("<=", "4.56.2"): + weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME] # model_pytorch, diffusion_model_pytorch, ... weight_prefixes = [w.split(".")[0] for w in weight_names] @@ -191,7 +195,9 @@ def filter_model_files(filenames): ] if is_transformers_available(): - weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] + weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME] + if is_transformers_version("<=", "4.56.2"): + weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME] allowed_extensions = [wn.split(".")[-1] for wn in weight_names] @@ -212,7 +218,9 @@ def variant_compatible_siblings(filenames, variant=None, ignore_patterns=None) - ] if is_transformers_available(): - weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] + weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME] + if is_transformers_version("<=", "4.56.2"): + weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME] # model_pytorch, diffusion_model_pytorch, ... weight_prefixes = [w.split(".")[0] for w in weight_names] From df267ee4e8500a2ef5960879f6d1ea49cc8ec40d Mon Sep 17 00:00:00 2001 From: naykun Date: Mon, 22 Sep 2025 00:10:52 +0800 Subject: [PATCH 73/74] feat: Add QwenImageEditPlus to support future feature upgrades (#12357) * feat: add support of qwenimageeditplus * add copies statement * fix copies statement * remove vl_processor reference --- src/diffusers/__init__.py | 2 + src/diffusers/pipelines/__init__.py | 2 + src/diffusers/pipelines/qwenimage/__init__.py | 2 + .../qwenimage/pipeline_qwenimage_edit.py | 1 - .../qwenimage/pipeline_qwenimage_edit_plus.py | 883 ++++++++++++++++++ .../dummy_torch_and_transformers_objects.py | 15 + 6 files changed, 904 insertions(+), 1 deletion(-) create mode 100644 src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_plus.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 167d39c6e8..741fcd14f2 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -515,6 +515,7 @@ else: "QwenImageControlNetPipeline", "QwenImageEditInpaintPipeline", "QwenImageEditPipeline", + "QwenImageEditPlusPipeline", "QwenImageImg2ImgPipeline", "QwenImageInpaintPipeline", "QwenImagePipeline", @@ -1170,6 +1171,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: QwenImageControlNetPipeline, QwenImageEditInpaintPipeline, QwenImageEditPipeline, + QwenImageEditPlusPipeline, QwenImageImg2ImgPipeline, QwenImageInpaintPipeline, QwenImagePipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 17f3fc909e..190c7871d2 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -394,6 +394,7 @@ else: "QwenImageImg2ImgPipeline", "QwenImageInpaintPipeline", "QwenImageEditPipeline", + "QwenImageEditPlusPipeline", "QwenImageEditInpaintPipeline", "QwenImageControlNetInpaintPipeline", "QwenImageControlNetPipeline", @@ -721,6 +722,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: QwenImageControlNetPipeline, QwenImageEditInpaintPipeline, QwenImageEditPipeline, + QwenImageEditPlusPipeline, QwenImageImg2ImgPipeline, QwenImageInpaintPipeline, QwenImagePipeline, diff --git a/src/diffusers/pipelines/qwenimage/__init__.py b/src/diffusers/pipelines/qwenimage/__init__.py index 36d92917fd..0cd9ab40e8 100644 --- a/src/diffusers/pipelines/qwenimage/__init__.py +++ b/src/diffusers/pipelines/qwenimage/__init__.py @@ -27,6 +27,7 @@ else: _import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"] _import_structure["pipeline_qwenimage_controlnet_inpaint"] = ["QwenImageControlNetInpaintPipeline"] _import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"] + _import_structure["pipeline_qwenimage_edit_plus"] = ["QwenImageEditPlusPipeline"] _import_structure["pipeline_qwenimage_edit_inpaint"] = ["QwenImageEditInpaintPipeline"] _import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"] _import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"] @@ -43,6 +44,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_qwenimage_controlnet_inpaint import QwenImageControlNetInpaintPipeline from .pipeline_qwenimage_edit import QwenImageEditPipeline from .pipeline_qwenimage_edit_inpaint import QwenImageEditInpaintPipeline + from .pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline from .pipeline_qwenimage_img2img import QwenImageImg2ImgPipeline from .pipeline_qwenimage_inpaint import QwenImageInpaintPipeline else: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py index 88d1ce4a46..ed37b238c8 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py @@ -208,7 +208,6 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) - self.vl_processor = processor self.tokenizer_max_length = 1024 self.prompt_template_encode = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n" diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_plus.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_plus.py new file mode 100644 index 0000000000..ec203edf16 --- /dev/null +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_plus.py @@ -0,0 +1,883 @@ +# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import QwenImageLoraLoaderMixin +from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import QwenImagePipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from PIL import Image + >>> from diffusers import QwenImageEditPlusPipeline + >>> from diffusers.utils import load_image + + >>> pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + >>> image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png" + ... ).convert("RGB") + >>> prompt = ( + ... "Make Pikachu hold a sign that says 'Qwen Edit is awesome', yarn art style, detailed, vibrant colors" + ... ) + >>> # Depending on the variant being used, the pipeline call will slightly vary. + >>> # Refer to the pipeline documentation for more details. + >>> image = pipe(image, prompt, num_inference_steps=50).images[0] + >>> image.save("qwenimage_edit_plus.png") + ``` +""" + +CONDITION_IMAGE_SIZE = 384 * 384 +VAE_IMAGE_SIZE = 1024 * 1024 + + +# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.15, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def calculate_dimensions(target_area, ratio): + width = math.sqrt(target_area * ratio) + height = width / ratio + + width = round(width / 32) * 32 + height = round(height / 32) * 32 + + return width, height + + +class QwenImageEditPlusPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): + r""" + The Qwen-Image-Edit pipeline for image editing. + + Args: + transformer ([`QwenImageTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`Qwen2.5-VL-7B-Instruct`]): + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the + [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant. + tokenizer (`QwenTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + """ + + model_cpu_offload_seq = "text_encoder->transformer->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKLQwenImage, + text_encoder: Qwen2_5_VLForConditionalGeneration, + tokenizer: Qwen2Tokenizer, + processor: Qwen2VLProcessor, + transformer: QwenImageTransformer2DModel, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + processor=processor, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 + self.latent_channels = self.vae.config.z_dim if getattr(self, "vae", None) else 16 + # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible + # by the patch size. So the vae scale factor is multiplied by the patch size to account for this + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) + self.tokenizer_max_length = 1024 + + self.prompt_template_encode = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + self.prompt_template_encode_start_idx = 64 + self.default_sample_size = 128 + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden + def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor): + bool_mask = mask.bool() + valid_lengths = bool_mask.sum(dim=1) + selected = hidden_states[bool_mask] + split_result = torch.split(selected, valid_lengths.tolist(), dim=0) + + return split_result + + def _get_qwen_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + image: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + img_prompt_template = "Picture {}: <|vision_start|><|image_pad|><|vision_end|>" + if isinstance(image, list): + base_img_prompt = "" + for i, img in enumerate(image): + base_img_prompt += img_prompt_template.format(i + 1) + elif image is not None: + base_img_prompt = img_prompt_template.format(1) + else: + base_img_prompt = "" + + template = self.prompt_template_encode + + drop_idx = self.prompt_template_encode_start_idx + txt = [template.format(base_img_prompt + e) for e in prompt] + + model_inputs = self.processor( + text=txt, + images=image, + padding=True, + return_tensors="pt", + ).to(device) + + outputs = self.text_encoder( + input_ids=model_inputs.input_ids, + attention_mask=model_inputs.attention_mask, + pixel_values=model_inputs.pixel_values, + image_grid_thw=model_inputs.image_grid_thw, + output_hidden_states=True, + ) + + hidden_states = outputs.hidden_states[-1] + split_hidden_states = self._extract_masked_hidden(hidden_states, model_inputs.attention_mask) + split_hidden_states = [e[drop_idx:] for e in split_hidden_states] + attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states] + max_seq_len = max([e.size(0) for e in split_hidden_states]) + prompt_embeds = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states] + ) + encoder_attention_mask = torch.stack( + [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list] + ) + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + return prompt_embeds, encoder_attention_mask + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit.QwenImageEditPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + image: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + max_sequence_length: int = 1024, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + image (`torch.Tensor`, *optional*): + image to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, image, device) + + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1) + prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len) + + return prompt_embeds, prompt_embeds_mask + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit.QwenImageEditPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_embeds_mask=None, + negative_prompt_embeds_mask=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: + logger.warning( + f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_embeds_mask is None: + raise ValueError( + "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`." + ) + if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 1024: + raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}") + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (vae_scale_factor * 2)) + width = 2 * (int(width) // (vae_scale_factor * 2)) + + latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), 1, height, width) + + return latents + + # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit.QwenImageEditPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode="argmax") + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode="argmax") + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.latent_channels, 1, 1, 1) + .to(image_latents.device, image_latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std) + .view(1, self.latent_channels, 1, 1, 1) + .to(image_latents.device, image_latents.dtype) + ) + image_latents = (image_latents - latents_mean) / latents_std + + return image_latents + + def prepare_latents( + self, + images, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (self.vae_scale_factor * 2)) + width = 2 * (int(width) // (self.vae_scale_factor * 2)) + + shape = (batch_size, 1, num_channels_latents, height, width) + + image_latents = None + if images is not None: + if not isinstance(images, list): + images = [images] + all_image_latents = [] + for image in images: + image = image.to(device=device, dtype=dtype) + if image.shape[1] != self.latent_channels: + image_latents = self._encode_vae_image(image=image, generator=generator) + else: + image_latents = image + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + image_latent_height, image_latent_width = image_latents.shape[3:] + image_latents = self._pack_latents( + image_latents, batch_size, num_channels_latents, image_latent_height, image_latent_width + ) + all_image_latents.append(image_latents) + image_latents = torch.cat(all_image_latents, dim=1) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + else: + latents = latents.to(device=device, dtype=dtype) + + return latents, image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def attention_kwargs(self): + return self._attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def current_timestep(self): + return self._current_timestep + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Optional[PipelineImageInput] = None, + prompt: Union[str, List[str]] = None, + negative_prompt: Union[str, List[str]] = None, + true_cfg_scale: float = 4.0, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + sigmas: Optional[List[float]] = None, + guidance_scale: Optional[float] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is + not greater than `1`). + true_cfg_scale (`float`, *optional*, defaults to 1.0): + true_cfg_scale (`float`, *optional*, defaults to 1.0): Guidance scale as defined in [Classifier-Free + Diffusion Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of + equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is + enabled by setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale + encourages to generate images that are closely linked to the text `prompt`, usually at the expense of + lower image quality. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to None): + A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance + where the guidance scale is applied during inference through noise prediction rescaling, guidance + distilled models take the guidance scale directly as an input parameter during forward pass. Guidance + scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images + that are closely linked to the text `prompt`, usually at the expense of lower image quality. This + parameter in the pipeline is there to support future guidance-distilled models when they come up. It is + ignored when not using guidance distilled models. To enable traditional classifier-free guidance, + please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should + enable classifier-free guidance computations). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will be generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple. + attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`: + [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is a list with the generated images. + """ + image_size = image[-1].size if isinstance(image, list) else image.size + calculated_width, calculated_height = calculate_dimensions(1024 * 1024, image_size[0] / image_size[1]) + height = height or calculated_height + width = width or calculated_width + + multiple_of = self.vae_scale_factor * 2 + width = width // multiple_of * multiple_of + height = height // multiple_of * multiple_of + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + negative_prompt_embeds_mask=negative_prompt_embeds_mask, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._attention_kwargs = attention_kwargs + self._current_timestep = None + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # 3. Preprocess image + if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels): + if not isinstance(image, list): + image = [image] + condition_image_sizes = [] + condition_images = [] + vae_image_sizes = [] + vae_images = [] + for img in image: + image_width, image_height = img.size + condition_width, condition_height = calculate_dimensions( + CONDITION_IMAGE_SIZE, image_width / image_height + ) + vae_width, vae_height = calculate_dimensions(VAE_IMAGE_SIZE, image_width / image_height) + condition_image_sizes.append((condition_width, condition_height)) + vae_image_sizes.append((vae_width, vae_height)) + condition_images.append(self.image_processor.resize(img, condition_height, condition_width)) + vae_images.append(self.image_processor.preprocess(img, vae_height, vae_width).unsqueeze(2)) + + has_neg_prompt = negative_prompt is not None or ( + negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None + ) + + if true_cfg_scale > 1 and not has_neg_prompt: + logger.warning( + f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." + ) + elif true_cfg_scale <= 1 and has_neg_prompt: + logger.warning( + " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" + ) + + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + prompt_embeds, prompt_embeds_mask = self.encode_prompt( + image=condition_images, + prompt=prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_mask=prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + if do_true_cfg: + negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt( + image=condition_images, + prompt=negative_prompt, + prompt_embeds=negative_prompt_embeds, + prompt_embeds_mask=negative_prompt_embeds_mask, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, image_latents = self.prepare_latents( + vae_images, + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + img_shapes = [ + [ + (1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2), + *[ + (1, vae_height // self.vae_scale_factor // 2, vae_width // self.vae_scale_factor // 2) + for vae_width, vae_height in vae_image_sizes + ], + ] + ] * batch_size + + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.get("base_image_seq_len", 256), + self.scheduler.config.get("max_image_seq_len", 4096), + self.scheduler.config.get("base_shift", 0.5), + self.scheduler.config.get("max_shift", 1.15), + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + sigmas=sigmas, + mu=mu, + ) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # handle guidance + if self.transformer.config.guidance_embeds and guidance_scale is None: + raise ValueError("guidance_scale is required for guidance-distilled model.") + elif self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + elif not self.transformer.config.guidance_embeds and guidance_scale is not None: + logger.warning( + f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." + ) + guidance = None + elif not self.transformer.config.guidance_embeds and guidance_scale is None: + guidance = None + + if self.attention_kwargs is None: + self._attention_kwargs = {} + + txt_seq_lens = prompt_embeds_mask.sum(dim=1).tolist() if prompt_embeds_mask is not None else None + negative_txt_seq_lens = ( + negative_prompt_embeds_mask.sum(dim=1).tolist() if negative_prompt_embeds_mask is not None else None + ) + + # 6. Denoising loop + self.scheduler.set_begin_index(0) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + + latent_model_input = latents + if image_latents is not None: + latent_model_input = torch.cat([latents, image_latents], dim=1) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + with self.transformer.cache_context("cond"): + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep / 1000, + guidance=guidance, + encoder_hidden_states_mask=prompt_embeds_mask, + encoder_hidden_states=prompt_embeds, + img_shapes=img_shapes, + txt_seq_lens=txt_seq_lens, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + noise_pred = noise_pred[:, : latents.size(1)] + + if do_true_cfg: + with self.transformer.cache_context("uncond"): + neg_noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep / 1000, + guidance=guidance, + encoder_hidden_states_mask=negative_prompt_embeds_mask, + encoder_hidden_states=negative_prompt_embeds, + img_shapes=img_shapes, + txt_seq_lens=negative_txt_seq_lens, + attention_kwargs=self.attention_kwargs, + return_dict=False, + )[0] + neg_noise_pred = neg_noise_pred[:, : latents.size(1)] + comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) + + cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True) + noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True) + noise_pred = comb_pred * (cond_norm / noise_norm) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + self._current_timestep = None + if output_type == "latent": + image = latents + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = latents.to(self.vae.dtype) + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.vae.config.z_dim, 1, 1, 1) + .to(latents.device, latents.dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( + latents.device, latents.dtype + ) + latents = latents / latents_std + latents_mean + image = self.vae.decode(latents, return_dict=False)[0][:, :, 0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return QwenImagePipelineOutput(images=image) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index e29be174f0..bb8fea8c8a 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1892,6 +1892,21 @@ class QwenImageEditPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) +class QwenImageEditPlusPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class QwenImageImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] From c006a95df1f84f3c3c49b53926bd75c518a68fcb Mon Sep 17 00:00:00 2001 From: Jason Cox Date: Sun, 21 Sep 2025 20:07:17 -0700 Subject: [PATCH 74/74] Fix example server install instructions (#12362) * Upgrade huggingface-hub to version 0.35.0 Updated huggingface-hub version from 0.26.1 to 0.35.0. * Add uvicorn and accelerate to requirements * Fix install instructions for server --- examples/server/README.md | 4 ++-- examples/server/requirements.in | 3 ++- examples/server/requirements.txt | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index 8ad0ed3cbe..f8cd58fc1c 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -9,8 +9,8 @@ This guide will show you how to use the [`StableDiffusion3Pipeline`] in a server Start by navigating to the `examples/server` folder and installing all of the dependencies. ```py -pip install . -pip install -f requirements.txt +pip install diffusers +pip install -r requirements.txt ``` Launch the server with the following command. diff --git a/examples/server/requirements.in b/examples/server/requirements.in index a469569a10..f8c35d48cd 100644 --- a/examples/server/requirements.in +++ b/examples/server/requirements.in @@ -6,4 +6,5 @@ py-consul prometheus_client >= 0.18.0 prometheus-fastapi-instrumentator >= 7.0.0 fastapi -uvicorn \ No newline at end of file +uvicorn +accelerate diff --git a/examples/server/requirements.txt b/examples/server/requirements.txt index b91a8861a0..688a4ee94f 100644 --- a/examples/server/requirements.txt +++ b/examples/server/requirements.txt @@ -39,7 +39,7 @@ fsspec==2024.10.0 # torch h11==0.14.0 # via uvicorn -huggingface-hub==0.26.1 +huggingface-hub==0.35.0 # via # tokenizers # transformers