diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml
index 748389f373..c176f8786f 100644
--- a/docs/source/en/_toctree.yml
+++ b/docs/source/en/_toctree.yml
@@ -401,6 +401,8 @@
title: WanAnimateTransformer3DModel
- local: api/models/wan_transformer_3d
title: WanTransformer3DModel
+ - local: api/models/z_image_transformer2d
+ title: ZImageTransformer2DModel
title: Transformers
- sections:
- local: api/models/stable_cascade_unet
@@ -551,6 +553,8 @@
title: Kandinsky 2.2
- local: api/pipelines/kandinsky3
title: Kandinsky 3
+ - local: api/pipelines/kandinsky5_image
+ title: Kandinsky 5.0 Image
- local: api/pipelines/kolors
title: Kolors
- local: api/pipelines/latent_consistency_models
@@ -646,6 +650,8 @@
title: VisualCloze
- local: api/pipelines/wuerstchen
title: Wuerstchen
+ - local: api/pipelines/z_image
+ title: Z-Image
title: Image
- sections:
- local: api/pipelines/allegro
@@ -664,8 +670,6 @@
title: HunyuanVideo1.5
- local: api/pipelines/i2vgenxl
title: I2VGen-XL
- - local: api/pipelines/kandinsky5_image
- title: Kandinsky 5.0 Image
- local: api/pipelines/kandinsky5_video
title: Kandinsky 5.0 Video
- local: api/pipelines/latte
diff --git a/docs/source/en/api/cache.md b/docs/source/en/api/cache.md
index 9ba4742085..c93dcad438 100644
--- a/docs/source/en/api/cache.md
+++ b/docs/source/en/api/cache.md
@@ -34,3 +34,9 @@ Cache methods speedup diffusion transformers by storing and reusing intermediate
[[autodoc]] FirstBlockCacheConfig
[[autodoc]] apply_first_block_cache
+
+### TaylorSeerCacheConfig
+
+[[autodoc]] TaylorSeerCacheConfig
+
+[[autodoc]] apply_taylorseer_cache
diff --git a/docs/source/en/api/models/z_image_transformer2d.md b/docs/source/en/api/models/z_image_transformer2d.md
new file mode 100644
index 0000000000..2ecb9851fe
--- /dev/null
+++ b/docs/source/en/api/models/z_image_transformer2d.md
@@ -0,0 +1,19 @@
+
+
+# ZImageTransformer2DModel
+
+A Transformer model for image-like data from [Z-Image](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo).
+
+## ZImageTransformer2DModel
+
+[[autodoc]] ZImageTransformer2DModel
\ No newline at end of file
diff --git a/docs/source/en/api/pipelines/kandinsky5_image.md b/docs/source/en/api/pipelines/kandinsky5_image.md
index e30a1e3ee5..1125e1594b 100644
--- a/docs/source/en/api/pipelines/kandinsky5_image.md
+++ b/docs/source/en/api/pipelines/kandinsky5_image.md
@@ -11,7 +11,7 @@ specific language governing permissions and limitations under the License.
[Kandinsky 5.0](https://arxiv.org/abs/2511.14993) is a family of diffusion models for Video & Image generation.
-Kandinsky 5.0 Image Lite is a lightweight image generation model (6B parameters)
+Kandinsky 5.0 Image Lite is a lightweight image generation model (6B parameters).
The model introduces several key innovations:
- **Latent diffusion pipeline** with **Flow Matching** for improved training stability
@@ -21,10 +21,14 @@ The model introduces several key innovations:
The original codebase can be found at [kandinskylab/Kandinsky-5](https://github.com/kandinskylab/Kandinsky-5).
+> [!TIP]
+> Check out the [Kandinsky Lab](https://huggingface.co/kandinskylab) organization on the Hub for the official model checkpoints for text-to-video generation, including pretrained, SFT, no-CFG, and distilled variants.
+
## Available Models
Kandinsky 5.0 Image Lite:
+
| model_id | Description | Use Cases |
|------------|-------------|-----------|
| [**kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers**](https://huggingface.co/kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers) | 6B image Supervised Fine-Tuned model | Highest generation quality |
diff --git a/docs/source/en/api/pipelines/kandinsky5_video.md b/docs/source/en/api/pipelines/kandinsky5_video.md
index d7bc76c9bf..733e248173 100644
--- a/docs/source/en/api/pipelines/kandinsky5_video.md
+++ b/docs/source/en/api/pipelines/kandinsky5_video.md
@@ -30,6 +30,7 @@ The original codebase can be found at [kandinskylab/Kandinsky-5](https://github.
## Available Models
Kandinsky 5.0 T2V Pro:
+
| model_id | Description | Use Cases |
|------------|-------------|-----------|
| **kandinskylab/Kandinsky-5.0-T2V-Pro-sft-5s-Diffusers** | 5 second Text-to-Video Pro model | High-quality text-to-video generation |
diff --git a/docs/source/en/api/pipelines/z_image.md b/docs/source/en/api/pipelines/z_image.md
new file mode 100644
index 0000000000..5175f6b0fb
--- /dev/null
+++ b/docs/source/en/api/pipelines/z_image.md
@@ -0,0 +1,66 @@
+
+
+# Z-Image
+
+
+

+
+
+[Z-Image](https://huggingface.co/papers/2511.22699) is a powerful and highly efficient image generation model with 6B parameters. Currently there's only one model with two more to be released:
+
+|Model|Hugging Face|
+|---|---|
+|Z-Image-Turbo|https://huggingface.co/Tongyi-MAI/Z-Image-Turbo|
+
+## Z-Image-Turbo
+
+Z-Image-Turbo is a distilled version of Z-Image that matches or exceeds leading competitors with only 8 NFEs (Number of Function Evaluations). It offers sub-second inference latency on enterprise-grade H800 GPUs and fits comfortably within 16G VRAM consumer devices. It excels in photorealistic image generation, bilingual text rendering (English & Chinese), and robust instruction adherence.
+
+## Image-to-image
+
+Use [`ZImageImg2ImgPipeline`] to transform an existing image based on a text prompt.
+
+```python
+import torch
+from diffusers import ZImageImg2ImgPipeline
+from diffusers.utils import load_image
+
+pipe = ZImageImg2ImgPipeline.from_pretrained("Tongyi-MAI/Z-Image-Turbo", torch_dtype=torch.bfloat16)
+pipe.to("cuda")
+
+url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
+init_image = load_image(url).resize((1024, 1024))
+
+prompt = "A fantasy landscape with mountains and a river, detailed, vibrant colors"
+image = pipe(
+ prompt,
+ image=init_image,
+ strength=0.6,
+ num_inference_steps=9,
+ guidance_scale=0.0,
+ generator=torch.Generator("cuda").manual_seed(42),
+).images[0]
+image.save("zimage_img2img.png")
+```
+
+## ZImagePipeline
+
+[[autodoc]] ZImagePipeline
+ - all
+ - __call__
+
+## ZImageImg2ImgPipeline
+
+[[autodoc]] ZImageImg2ImgPipeline
+ - all
+ - __call__
diff --git a/docs/source/en/optimization/attention_backends.md b/docs/source/en/optimization/attention_backends.md
index e640c4a545..f3ff4781c6 100644
--- a/docs/source/en/optimization/attention_backends.md
+++ b/docs/source/en/optimization/attention_backends.md
@@ -32,7 +32,7 @@ This guide will show you how to set and use the different attention backends.
The [`~ModelMixin.set_attention_backend`] method iterates through all the modules in the model and sets the appropriate attention backend to use. The attention backend setting persists until [`~ModelMixin.reset_attention_backend`] is called.
-The example below demonstrates how to enable the `_flash_3_hub` implementation for FlashAttention-3 from the [kernel](https://github.com/huggingface/kernels) library, which allows you to instantly use optimized compute kernels from the Hub without requiring any setup.
+The example below demonstrates how to enable the `_flash_3_hub` implementation for FlashAttention-3 from the [`kernels`](https://github.com/huggingface/kernels) library, which allows you to instantly use optimized compute kernels from the Hub without requiring any setup.
> [!NOTE]
> FlashAttention-3 is not supported for non-Hopper architectures, in which case, use FlashAttention with `set_attention_backend("flash")`.
@@ -156,4 +156,4 @@ Refer to the table below for a complete list of available attention backends and
| `_sage_qk_int8_pv_fp16_triton` | [SageAttention](https://github.com/thu-ml/SageAttention) | INT8 QK + FP16 PV (Triton) |
| `xformers` | [xFormers](https://github.com/facebookresearch/xformers) | Memory-efficient attention |
-
\ No newline at end of file
+
diff --git a/docs/source/en/optimization/cache.md b/docs/source/en/optimization/cache.md
index 881529b27f..6397c7d4cd 100644
--- a/docs/source/en/optimization/cache.md
+++ b/docs/source/en/optimization/cache.md
@@ -66,4 +66,35 @@ config = FasterCacheConfig(
tensor_format="BFCHW",
)
pipeline.transformer.enable_cache(config)
+```
+
+## TaylorSeer Cache
+
+[TaylorSeer Cache](https://huggingface.co/papers/2403.06923) accelerates diffusion inference by using Taylor series expansions to approximate and cache intermediate activations across denoising steps. The method predicts future outputs based on past computations, reusing them at specified intervals to reduce redundant calculations.
+
+This caching mechanism delivers strong results with minimal additional memory overhead. For detailed performance analysis, see [our findings here](https://github.com/huggingface/diffusers/pull/12648#issuecomment-3610615080).
+
+To enable TaylorSeer Cache, create a [`TaylorSeerCacheConfig`] and pass it to your pipeline's transformer:
+
+- `cache_interval`: Number of steps to reuse cached outputs before performing a full forward pass
+- `disable_cache_before_step`: Initial steps that use full computations to gather data for approximations
+- `max_order`: Approximation accuracy (in theory, higher values improve quality but increase memory usage but we recommend it should be set to `1`)
+
+```python
+import torch
+from diffusers import FluxPipeline, TaylorSeerCacheConfig
+
+pipe = FluxPipeline.from_pretrained(
+ "black-forest-labs/FLUX.1-dev",
+ torch_dtype=torch.bfloat16,
+)
+pipe.to("cuda")
+
+config = TaylorSeerCacheConfig(
+ cache_interval=5,
+ max_order=1,
+ disable_cache_before_step=10,
+ taylor_factors_dtype=torch.bfloat16,
+)
+pipe.transformer.enable_cache(config)
```
\ No newline at end of file
diff --git a/docs/source/en/quantization/modelopt.md b/docs/source/en/quantization/modelopt.md
index 06933d47c2..c7fca9d444 100644
--- a/docs/source/en/quantization/modelopt.md
+++ b/docs/source/en/quantization/modelopt.md
@@ -11,7 +11,7 @@ specific language governing permissions and limitations under the License. -->
# NVIDIA ModelOpt
-[NVIDIA-ModelOpt](https://github.com/NVIDIA/TensorRT-Model-Optimizer) is a unified library of state-of-the-art model optimization techniques like quantization, pruning, distillation, speculative decoding, etc. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed.
+[NVIDIA-ModelOpt](https://github.com/NVIDIA/Model-Optimizer) is a unified library of state-of-the-art model optimization techniques like quantization, pruning, distillation, speculative decoding, etc. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed.
Before you begin, make sure you have nvidia_modelopt installed.
@@ -57,7 +57,7 @@ image.save("output.png")
>
> The quantization methods in NVIDIA-ModelOpt are designed to reduce the memory footprint of model weights using various QAT (Quantization-Aware Training) and PTQ (Post-Training Quantization) techniques while maintaining model performance. However, the actual performance gain during inference depends on the deployment framework (e.g., TRT-LLM, TensorRT) and the specific hardware configuration.
>
-> More details can be found [here](https://github.com/NVIDIA/TensorRT-Model-Optimizer/tree/main/examples).
+> More details can be found [here](https://github.com/NVIDIA/Model-Optimizer/tree/main/examples).
## NVIDIAModelOptConfig
@@ -86,7 +86,7 @@ The quantization methods supported are as follows:
| **NVFP4** | `nvfp4 weight only`, `nvfp4 block quantization` | `quant_type`, `quant_type + channel_quantize + block_quantize` | `channel_quantize = -1 is only supported for now`|
-Refer to the [official modelopt documentation](https://nvidia.github.io/TensorRT-Model-Optimizer/) for a better understanding of the available quantization methods and the exhaustive list of configuration options available.
+Refer to the [official modelopt documentation](https://nvidia.github.io/Model-Optimizer/) for a better understanding of the available quantization methods and the exhaustive list of configuration options available.
## Serializing and Deserializing quantized models
diff --git a/scripts/convert_hunyuan_video1_5_to_diffusers.py b/scripts/convert_hunyuan_video1_5_to_diffusers.py
index 38226f684a..89e5cdb169 100644
--- a/scripts/convert_hunyuan_video1_5_to_diffusers.py
+++ b/scripts/convert_hunyuan_video1_5_to_diffusers.py
@@ -69,6 +69,11 @@ TRANSFORMER_CONFIGS = {
"target_size": 960,
"task_type": "i2v",
},
+ "480p_i2v_step_distilled": {
+ "target_size": 640,
+ "task_type": "i2v",
+ "use_meanflow": True,
+ },
}
SCHEDULER_CONFIGS = {
@@ -93,6 +98,9 @@ SCHEDULER_CONFIGS = {
"720p_i2v_distilled": {
"shift": 7.0,
},
+ "480p_i2v_step_distilled": {
+ "shift": 7.0,
+ },
}
GUIDANCE_CONFIGS = {
@@ -117,6 +125,9 @@ GUIDANCE_CONFIGS = {
"720p_i2v_distilled": {
"guidance_scale": 1.0,
},
+ "480p_i2v_step_distilled": {
+ "guidance_scale": 1.0,
+ },
}
@@ -126,7 +137,7 @@ def swap_scale_shift(weight):
return new_weight
-def convert_hyvideo15_transformer_to_diffusers(original_state_dict):
+def convert_hyvideo15_transformer_to_diffusers(original_state_dict, config=None):
"""
Convert HunyuanVideo 1.5 original checkpoint to Diffusers format.
"""
@@ -142,6 +153,20 @@ def convert_hyvideo15_transformer_to_diffusers(original_state_dict):
)
converted_state_dict["time_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop("time_in.mlp.2.bias")
+ if config.use_meanflow:
+ converted_state_dict["time_embed.timestep_embedder_r.linear_1.weight"] = original_state_dict.pop(
+ "time_r_in.mlp.0.weight"
+ )
+ converted_state_dict["time_embed.timestep_embedder_r.linear_1.bias"] = original_state_dict.pop(
+ "time_r_in.mlp.0.bias"
+ )
+ converted_state_dict["time_embed.timestep_embedder_r.linear_2.weight"] = original_state_dict.pop(
+ "time_r_in.mlp.2.weight"
+ )
+ converted_state_dict["time_embed.timestep_embedder_r.linear_2.bias"] = original_state_dict.pop(
+ "time_r_in.mlp.2.bias"
+ )
+
# 2. context_embedder.time_text_embed.timestep_embedder <- txt_in.t_embedder
converted_state_dict["context_embedder.time_text_embed.timestep_embedder.linear_1.weight"] = (
original_state_dict.pop("txt_in.t_embedder.mlp.0.weight")
@@ -627,7 +652,7 @@ def convert_transformer(args):
config = TRANSFORMER_CONFIGS[args.transformer_type]
with init_empty_weights():
transformer = HunyuanVideo15Transformer3DModel(**config)
- state_dict = convert_hyvideo15_transformer_to_diffusers(original_state_dict)
+ state_dict = convert_hyvideo15_transformer_to_diffusers(original_state_dict, config=transformer.config)
transformer.load_state_dict(state_dict, strict=True, assign=True)
return transformer
diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py
index eb8e86c4c8..e69d334fdb 100644
--- a/src/diffusers/__init__.py
+++ b/src/diffusers/__init__.py
@@ -169,10 +169,12 @@ else:
"LayerSkipConfig",
"PyramidAttentionBroadcastConfig",
"SmoothedEnergyGuidanceConfig",
+ "TaylorSeerCacheConfig",
"apply_faster_cache",
"apply_first_block_cache",
"apply_layer_skip",
"apply_pyramid_attention_broadcast",
+ "apply_taylorseer_cache",
]
)
_import_structure["models"].extend(
@@ -417,6 +419,8 @@ else:
"Wan22AutoBlocks",
"WanAutoBlocks",
"WanModularPipeline",
+ "ZImageAutoBlocks",
+ "ZImageModularPipeline",
]
)
_import_structure["pipelines"].extend(
@@ -660,6 +664,7 @@ else:
"WuerstchenCombinedPipeline",
"WuerstchenDecoderPipeline",
"WuerstchenPriorPipeline",
+ "ZImageImg2ImgPipeline",
"ZImagePipeline",
]
)
@@ -899,10 +904,12 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
LayerSkipConfig,
PyramidAttentionBroadcastConfig,
SmoothedEnergyGuidanceConfig,
+ TaylorSeerCacheConfig,
apply_faster_cache,
apply_first_block_cache,
apply_layer_skip,
apply_pyramid_attention_broadcast,
+ apply_taylorseer_cache,
)
from .models import (
AllegroTransformer3DModel,
@@ -1119,6 +1126,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
Wan22AutoBlocks,
WanAutoBlocks,
WanModularPipeline,
+ ZImageAutoBlocks,
+ ZImageModularPipeline,
)
from .pipelines import (
AllegroPipeline,
@@ -1356,6 +1365,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
WuerstchenCombinedPipeline,
WuerstchenDecoderPipeline,
WuerstchenPriorPipeline,
+ ZImageImg2ImgPipeline,
ZImagePipeline,
)
diff --git a/src/diffusers/hooks/__init__.py b/src/diffusers/hooks/__init__.py
index 524a92ea99..eb12b8a52a 100644
--- a/src/diffusers/hooks/__init__.py
+++ b/src/diffusers/hooks/__init__.py
@@ -25,3 +25,4 @@ if is_torch_available():
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
from .smoothed_energy_guidance_utils import SmoothedEnergyGuidanceConfig
+ from .taylorseer_cache import TaylorSeerCacheConfig, apply_taylorseer_cache
diff --git a/src/diffusers/hooks/group_offloading.py b/src/diffusers/hooks/group_offloading.py
index 11b8dfd152..47f1f41996 100644
--- a/src/diffusers/hooks/group_offloading.py
+++ b/src/diffusers/hooks/group_offloading.py
@@ -15,7 +15,7 @@
import hashlib
import os
from contextlib import contextmanager, nullcontext
-from dataclasses import dataclass
+from dataclasses import dataclass, replace
from enum import Enum
from typing import Dict, List, Optional, Set, Tuple, Union
@@ -59,6 +59,9 @@ class GroupOffloadingConfig:
num_blocks_per_group: Optional[int] = None
offload_to_disk_path: Optional[str] = None
stream: Optional[Union[torch.cuda.Stream, torch.Stream]] = None
+ block_modules: Optional[List[str]] = None
+ exclude_kwargs: Optional[List[str]] = None
+ module_prefix: Optional[str] = ""
class ModuleGroup:
@@ -77,7 +80,7 @@ class ModuleGroup:
low_cpu_mem_usage: bool = False,
onload_self: bool = True,
offload_to_disk_path: Optional[str] = None,
- group_id: Optional[int] = None,
+ group_id: Optional[Union[int, str]] = None,
) -> None:
self.modules = modules
self.offload_device = offload_device
@@ -322,7 +325,21 @@ class GroupOffloadingHook(ModelHook):
self.group.stream.synchronize()
args = send_to_device(args, self.group.onload_device, non_blocking=self.group.non_blocking)
- kwargs = send_to_device(kwargs, self.group.onload_device, non_blocking=self.group.non_blocking)
+
+ # Some Autoencoder models use a feature cache that is passed through submodules
+ # and modified in place. The `send_to_device` call returns a copy of this feature cache object
+ # which breaks the inplace updates. Use `exclude_kwargs` to mark these cache features
+ exclude_kwargs = self.config.exclude_kwargs or []
+ if exclude_kwargs:
+ moved_kwargs = send_to_device(
+ {k: v for k, v in kwargs.items() if k not in exclude_kwargs},
+ self.group.onload_device,
+ non_blocking=self.group.non_blocking,
+ )
+ kwargs.update(moved_kwargs)
+ else:
+ kwargs = send_to_device(kwargs, self.group.onload_device, non_blocking=self.group.non_blocking)
+
return args, kwargs
def post_forward(self, module: torch.nn.Module, output):
@@ -455,6 +472,8 @@ def apply_group_offloading(
record_stream: bool = False,
low_cpu_mem_usage: bool = False,
offload_to_disk_path: Optional[str] = None,
+ block_modules: Optional[List[str]] = None,
+ exclude_kwargs: Optional[List[str]] = None,
) -> None:
r"""
Applies group offloading to the internal layers of a torch.nn.Module. To understand what group offloading is, and
@@ -512,6 +531,13 @@ def apply_group_offloading(
If True, the CPU memory usage is minimized by pinning tensors on-the-fly instead of pre-pinning them. This
option only matters when using streamed CPU offloading (i.e. `use_stream=True`). This can be useful when
the CPU memory is a bottleneck but may counteract the benefits of using streams.
+ block_modules (`List[str]`, *optional*):
+ List of module names that should be treated as blocks for offloading. If provided, only these modules will
+ be considered for block-level offloading. If not provided, the default block detection logic will be used.
+ exclude_kwargs (`List[str]`, *optional*):
+ List of kwarg keys that should not be processed by send_to_device. This is useful for mutable state like
+ caching lists that need to maintain their object identity across forward passes. If not provided, will be
+ inferred from the module's `_skip_keys` attribute if it exists.
Example:
```python
@@ -553,6 +579,12 @@ def apply_group_offloading(
_raise_error_if_accelerate_model_or_sequential_hook_present(module)
+ if block_modules is None:
+ block_modules = getattr(module, "_group_offload_block_modules", None)
+
+ if exclude_kwargs is None:
+ exclude_kwargs = getattr(module, "_skip_keys", None)
+
config = GroupOffloadingConfig(
onload_device=onload_device,
offload_device=offload_device,
@@ -563,6 +595,8 @@ def apply_group_offloading(
record_stream=record_stream,
low_cpu_mem_usage=low_cpu_mem_usage,
offload_to_disk_path=offload_to_disk_path,
+ block_modules=block_modules,
+ exclude_kwargs=exclude_kwargs,
)
_apply_group_offloading(module, config)
@@ -578,46 +612,66 @@ def _apply_group_offloading(module: torch.nn.Module, config: GroupOffloadingConf
def _apply_group_offloading_block_level(module: torch.nn.Module, config: GroupOffloadingConfig) -> None:
r"""
- This function applies offloading to groups of torch.nn.ModuleList or torch.nn.Sequential blocks. In comparison to
- the "leaf_level" offloading, which is more fine-grained, this offloading is done at the top-level blocks.
- """
+ This function applies offloading to groups of torch.nn.ModuleList or torch.nn.Sequential blocks, and explicitly
+ defined block modules. In comparison to the "leaf_level" offloading, which is more fine-grained, this offloading is
+ done at the top-level blocks and modules specified in block_modules.
+ When block_modules is provided, only those modules will be treated as blocks for offloading. For each specified
+ module, recursively apply block offloading to it.
+ """
if config.stream is not None and config.num_blocks_per_group != 1:
logger.warning(
f"Using streams is only supported for num_blocks_per_group=1. Got {config.num_blocks_per_group=}. Setting it to 1."
)
config.num_blocks_per_group = 1
- # Create module groups for ModuleList and Sequential blocks
+ block_modules = set(config.block_modules) if config.block_modules is not None else set()
+
+ # Create module groups for ModuleList and Sequential blocks, and explicitly defined block modules
modules_with_group_offloading = set()
unmatched_modules = []
matched_module_groups = []
- for name, submodule in module.named_children():
- if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
- unmatched_modules.append((name, submodule))
- modules_with_group_offloading.add(name)
- continue
- for i in range(0, len(submodule), config.num_blocks_per_group):
- current_modules = submodule[i : i + config.num_blocks_per_group]
- group_id = f"{name}_{i}_{i + len(current_modules) - 1}"
- group = ModuleGroup(
- modules=current_modules,
- offload_device=config.offload_device,
- onload_device=config.onload_device,
- offload_to_disk_path=config.offload_to_disk_path,
- offload_leader=current_modules[-1],
- onload_leader=current_modules[0],
- non_blocking=config.non_blocking,
- stream=config.stream,
- record_stream=config.record_stream,
- low_cpu_mem_usage=config.low_cpu_mem_usage,
- onload_self=True,
- group_id=group_id,
- )
- matched_module_groups.append(group)
- for j in range(i, i + len(current_modules)):
- modules_with_group_offloading.add(f"{name}.{j}")
+ for name, submodule in module.named_children():
+ # Check if this is an explicitly defined block module
+ if name in block_modules:
+ # Track submodule using a prefix to avoid filename collisions during disk offload.
+ # Without this, submodules sharing the same model class would be assigned identical
+ # filenames (derived from the class name).
+ prefix = f"{config.module_prefix}{name}." if config.module_prefix else f"{name}."
+ submodule_config = replace(config, module_prefix=prefix)
+
+ _apply_group_offloading_block_level(submodule, submodule_config)
+ modules_with_group_offloading.add(name)
+
+ elif isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
+ # Handle ModuleList and Sequential blocks as before
+ for i in range(0, len(submodule), config.num_blocks_per_group):
+ current_modules = list(submodule[i : i + config.num_blocks_per_group])
+ if len(current_modules) == 0:
+ continue
+
+ group_id = f"{config.module_prefix}{name}_{i}_{i + len(current_modules) - 1}"
+ group = ModuleGroup(
+ modules=current_modules,
+ offload_device=config.offload_device,
+ onload_device=config.onload_device,
+ offload_to_disk_path=config.offload_to_disk_path,
+ offload_leader=current_modules[-1],
+ onload_leader=current_modules[0],
+ non_blocking=config.non_blocking,
+ stream=config.stream,
+ record_stream=config.record_stream,
+ low_cpu_mem_usage=config.low_cpu_mem_usage,
+ onload_self=True,
+ group_id=group_id,
+ )
+ matched_module_groups.append(group)
+ for j in range(i, i + len(current_modules)):
+ modules_with_group_offloading.add(f"{name}.{j}")
+ else:
+ # This is an unmatched module
+ unmatched_modules.append((name, submodule))
# Apply group offloading hooks to the module groups
for i, group in enumerate(matched_module_groups):
@@ -632,28 +686,29 @@ def _apply_group_offloading_block_level(module: torch.nn.Module, config: GroupOf
parameters = [param for _, param in parameters]
buffers = [buffer for _, buffer in buffers]
- # Create a group for the unmatched submodules of the top-level module so that they are on the correct
- # device when the forward pass is called.
+ # Create a group for the remaining unmatched submodules of the top-level
+ # module so that they are on the correct device when the forward pass is called.
unmatched_modules = [unmatched_module for _, unmatched_module in unmatched_modules]
- unmatched_group = ModuleGroup(
- modules=unmatched_modules,
- offload_device=config.offload_device,
- onload_device=config.onload_device,
- offload_to_disk_path=config.offload_to_disk_path,
- offload_leader=module,
- onload_leader=module,
- parameters=parameters,
- buffers=buffers,
- non_blocking=False,
- stream=None,
- record_stream=False,
- onload_self=True,
- group_id=f"{module.__class__.__name__}_unmatched_group",
- )
- if config.stream is None:
- _apply_group_offloading_hook(module, unmatched_group, config=config)
- else:
- _apply_lazy_group_offloading_hook(module, unmatched_group, config=config)
+ if len(unmatched_modules) > 0 or len(parameters) > 0 or len(buffers) > 0:
+ unmatched_group = ModuleGroup(
+ modules=unmatched_modules,
+ offload_device=config.offload_device,
+ onload_device=config.onload_device,
+ offload_to_disk_path=config.offload_to_disk_path,
+ offload_leader=module,
+ onload_leader=module,
+ parameters=parameters,
+ buffers=buffers,
+ non_blocking=False,
+ stream=None,
+ record_stream=False,
+ onload_self=True,
+ group_id=f"{config.module_prefix}{module.__class__.__name__}_unmatched_group",
+ )
+ if config.stream is None:
+ _apply_group_offloading_hook(module, unmatched_group, config=config)
+ else:
+ _apply_lazy_group_offloading_hook(module, unmatched_group, config=config)
def _apply_group_offloading_leaf_level(module: torch.nn.Module, config: GroupOffloadingConfig) -> None:
diff --git a/src/diffusers/hooks/taylorseer_cache.py b/src/diffusers/hooks/taylorseer_cache.py
new file mode 100644
index 0000000000..7cad9f4fa1
--- /dev/null
+++ b/src/diffusers/hooks/taylorseer_cache.py
@@ -0,0 +1,346 @@
+import math
+import re
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple
+
+import torch
+import torch.nn as nn
+
+from ..utils import logging
+from .hooks import HookRegistry, ModelHook, StateManager
+
+
+logger = logging.get_logger(__name__)
+_TAYLORSEER_CACHE_HOOK = "taylorseer_cache"
+_SPATIAL_ATTENTION_BLOCK_IDENTIFIERS = (
+ "^blocks.*attn",
+ "^transformer_blocks.*attn",
+ "^single_transformer_blocks.*attn",
+)
+_TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS = ("^temporal_transformer_blocks.*attn",)
+_TRANSFORMER_BLOCK_IDENTIFIERS = _SPATIAL_ATTENTION_BLOCK_IDENTIFIERS + _TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS
+_BLOCK_IDENTIFIERS = ("^[^.]*block[^.]*\\.[^.]+$",)
+_PROJ_OUT_IDENTIFIERS = ("^proj_out$",)
+
+
+@dataclass
+class TaylorSeerCacheConfig:
+ """
+ Configuration for TaylorSeer cache. See: https://huggingface.co/papers/2503.06923
+
+ Attributes:
+ cache_interval (`int`, defaults to `5`):
+ The interval between full computation steps. After a full computation, the cached (predicted) outputs are
+ reused for this many subsequent denoising steps before refreshing with a new full forward pass.
+
+ disable_cache_before_step (`int`, defaults to `3`):
+ The denoising step index before which caching is disabled, meaning full computation is performed for the
+ initial steps (0 to disable_cache_before_step - 1) to gather data for Taylor series approximations. During
+ these steps, Taylor factors are updated, but caching/predictions are not applied. Caching begins at this
+ step.
+
+ disable_cache_after_step (`int`, *optional*, defaults to `None`):
+ The denoising step index after which caching is disabled. If set, for steps >= this value, all modules run
+ full computations without predictions or state updates, ensuring accuracy in later stages if needed.
+
+ max_order (`int`, defaults to `1`):
+ The highest order in the Taylor series expansion for approximating module outputs. Higher orders provide
+ better approximations but increase computation and memory usage.
+
+ taylor_factors_dtype (`torch.dtype`, defaults to `torch.bfloat16`):
+ Data type used for storing and computing Taylor series factors. Lower precision reduces memory but may
+ affect stability; higher precision improves accuracy at the cost of more memory.
+
+ skip_predict_identifiers (`List[str]`, *optional*, defaults to `None`):
+ Regex patterns (using `re.fullmatch`) for module names to place as "skip" in "cache" mode. In this mode,
+ the module computes fully during initial or refresh steps but returns a zero tensor (matching recorded
+ shape) during prediction steps to skip computation cheaply.
+
+ cache_identifiers (`List[str]`, *optional*, defaults to `None`):
+ Regex patterns (using `re.fullmatch`) for module names to place in Taylor-series caching mode, where
+ outputs are approximated and cached for reuse.
+
+ use_lite_mode (`bool`, *optional*, defaults to `False`):
+ Enables a lightweight TaylorSeer variant that minimizes memory usage by applying predefined patterns for
+ skipping and caching (e.g., skipping blocks and caching projections). This overrides any custom
+ `inactive_identifiers` or `active_identifiers`.
+
+ Notes:
+ - Patterns are matched using `re.fullmatch` on the module name.
+ - If `skip_predict_identifiers` or `cache_identifiers` are provided, only matching modules are hooked.
+ - If neither is provided, all attention-like modules are hooked by default.
+
+ Example of inactive and active usage:
+
+ ```py
+ def forward(x):
+ x = self.module1(x) # inactive module: returns zeros tensor based on shape recorded during full compute
+ x = self.module2(x) # active module: caches output here, avoiding recomputation of prior steps
+ return x
+ ```
+ """
+
+ cache_interval: int = 5
+ disable_cache_before_step: int = 3
+ disable_cache_after_step: Optional[int] = None
+ max_order: int = 1
+ taylor_factors_dtype: Optional[torch.dtype] = torch.bfloat16
+ skip_predict_identifiers: Optional[List[str]] = None
+ cache_identifiers: Optional[List[str]] = None
+ use_lite_mode: bool = False
+
+ def __repr__(self) -> str:
+ return (
+ "TaylorSeerCacheConfig("
+ f"cache_interval={self.cache_interval}, "
+ f"disable_cache_before_step={self.disable_cache_before_step}, "
+ f"disable_cache_after_step={self.disable_cache_after_step}, "
+ f"max_order={self.max_order}, "
+ f"taylor_factors_dtype={self.taylor_factors_dtype}, "
+ f"skip_predict_identifiers={self.skip_predict_identifiers}, "
+ f"cache_identifiers={self.cache_identifiers}, "
+ f"use_lite_mode={self.use_lite_mode})"
+ )
+
+
+class TaylorSeerState:
+ def __init__(
+ self,
+ taylor_factors_dtype: Optional[torch.dtype] = torch.bfloat16,
+ max_order: int = 1,
+ is_inactive: bool = False,
+ ):
+ self.taylor_factors_dtype = taylor_factors_dtype
+ self.max_order = max_order
+ self.is_inactive = is_inactive
+
+ self.module_dtypes: Tuple[torch.dtype, ...] = ()
+ self.last_update_step: Optional[int] = None
+ self.taylor_factors: Dict[int, Dict[int, torch.Tensor]] = {}
+ self.inactive_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None
+ self.device: Optional[torch.device] = None
+ self.current_step: int = -1
+
+ def reset(self) -> None:
+ self.current_step = -1
+ self.last_update_step = None
+ self.taylor_factors = {}
+ self.inactive_shapes = None
+ self.device = None
+
+ def update(
+ self,
+ outputs: Tuple[torch.Tensor, ...],
+ ) -> None:
+ self.module_dtypes = tuple(output.dtype for output in outputs)
+ self.device = outputs[0].device
+
+ if self.is_inactive:
+ self.inactive_shapes = tuple(output.shape for output in outputs)
+ else:
+ for i, features in enumerate(outputs):
+ new_factors: Dict[int, torch.Tensor] = {0: features}
+ is_first_update = self.last_update_step is None
+ if not is_first_update:
+ delta_step = self.current_step - self.last_update_step
+ if delta_step == 0:
+ raise ValueError("Delta step cannot be zero for TaylorSeer update.")
+
+ # Recursive divided differences up to max_order
+ prev_factors = self.taylor_factors.get(i, {})
+ for j in range(self.max_order):
+ prev = prev_factors.get(j)
+ if prev is None:
+ break
+ new_factors[j + 1] = (new_factors[j] - prev.to(features.dtype)) / delta_step
+ self.taylor_factors[i] = {
+ order: factor.to(self.taylor_factors_dtype) for order, factor in new_factors.items()
+ }
+
+ self.last_update_step = self.current_step
+
+ @torch.compiler.disable
+ def predict(self) -> List[torch.Tensor]:
+ if self.last_update_step is None:
+ raise ValueError("Cannot predict without prior initialization/update.")
+
+ step_offset = self.current_step - self.last_update_step
+
+ outputs = []
+ if self.is_inactive:
+ if self.inactive_shapes is None:
+ raise ValueError("Inactive shapes not set during prediction.")
+ for i in range(len(self.module_dtypes)):
+ outputs.append(
+ torch.zeros(
+ self.inactive_shapes[i],
+ dtype=self.module_dtypes[i],
+ device=self.device,
+ )
+ )
+ else:
+ if not self.taylor_factors:
+ raise ValueError("Taylor factors empty during prediction.")
+ num_outputs = len(self.taylor_factors)
+ num_orders = len(self.taylor_factors[0])
+ for i in range(num_outputs):
+ output_dtype = self.module_dtypes[i]
+ taylor_factors = self.taylor_factors[i]
+ output = torch.zeros_like(taylor_factors[0], dtype=output_dtype)
+ for order in range(num_orders):
+ coeff = (step_offset**order) / math.factorial(order)
+ factor = taylor_factors[order]
+ output = output + factor.to(output_dtype) * coeff
+ outputs.append(output)
+ return outputs
+
+
+class TaylorSeerCacheHook(ModelHook):
+ _is_stateful = True
+
+ def __init__(
+ self,
+ cache_interval: int,
+ disable_cache_before_step: int,
+ taylor_factors_dtype: torch.dtype,
+ state_manager: StateManager,
+ disable_cache_after_step: Optional[int] = None,
+ ):
+ super().__init__()
+ self.cache_interval = cache_interval
+ self.disable_cache_before_step = disable_cache_before_step
+ self.disable_cache_after_step = disable_cache_after_step
+ self.taylor_factors_dtype = taylor_factors_dtype
+ self.state_manager = state_manager
+
+ def initialize_hook(self, module: torch.nn.Module):
+ return module
+
+ def reset_state(self, module: torch.nn.Module) -> None:
+ """
+ Reset state between sampling runs.
+ """
+ self.state_manager.reset()
+
+ @torch.compiler.disable
+ def _measure_should_compute(self) -> bool:
+ state: TaylorSeerState = self.state_manager.get_state()
+ state.current_step += 1
+ current_step = state.current_step
+ is_warmup_phase = current_step < self.disable_cache_before_step
+ is_compute_interval = (current_step - self.disable_cache_before_step - 1) % self.cache_interval == 0
+ is_cooldown_phase = self.disable_cache_after_step is not None and current_step >= self.disable_cache_after_step
+ should_compute = is_warmup_phase or is_compute_interval or is_cooldown_phase
+ return should_compute, state
+
+ def new_forward(self, module: torch.nn.Module, *args, **kwargs):
+ should_compute, state = self._measure_should_compute()
+ if should_compute:
+ outputs = self.fn_ref.original_forward(*args, **kwargs)
+ wrapped_outputs = (outputs,) if isinstance(outputs, torch.Tensor) else outputs
+ state.update(wrapped_outputs)
+ return outputs
+
+ outputs_list = state.predict()
+ return outputs_list[0] if len(outputs_list) == 1 else tuple(outputs_list)
+
+
+def _resolve_patterns(config: TaylorSeerCacheConfig) -> Tuple[List[str], List[str]]:
+ """
+ Resolve effective inactive and active pattern lists from config + templates.
+ """
+
+ inactive_patterns = config.skip_predict_identifiers if config.skip_predict_identifiers is not None else None
+ active_patterns = config.cache_identifiers if config.cache_identifiers is not None else None
+
+ return inactive_patterns or [], active_patterns or []
+
+
+def apply_taylorseer_cache(module: torch.nn.Module, config: TaylorSeerCacheConfig):
+ """
+ Applies the TaylorSeer cache to a given pipeline (typically the transformer / UNet).
+
+ This function hooks selected modules in the model to enable caching or skipping based on the provided
+ configuration, reducing redundant computations in diffusion denoising loops.
+
+ Args:
+ module (torch.nn.Module): The model subtree to apply the hooks to.
+ config (TaylorSeerCacheConfig): Configuration for the cache.
+
+ Example:
+ ```python
+ >>> import torch
+ >>> from diffusers import FluxPipeline, TaylorSeerCacheConfig
+
+ >>> pipe = FluxPipeline.from_pretrained(
+ ... "black-forest-labs/FLUX.1-dev",
+ ... torch_dtype=torch.bfloat16,
+ ... )
+ >>> pipe.to("cuda")
+
+ >>> config = TaylorSeerCacheConfig(
+ ... cache_interval=5,
+ ... max_order=1,
+ ... disable_cache_before_step=3,
+ ... taylor_factors_dtype=torch.float32,
+ ... )
+ >>> pipe.transformer.enable_cache(config)
+ ```
+ """
+ inactive_patterns, active_patterns = _resolve_patterns(config)
+
+ active_patterns = active_patterns or _TRANSFORMER_BLOCK_IDENTIFIERS
+
+ if config.use_lite_mode:
+ logger.info("Using TaylorSeer Lite variant for cache.")
+ active_patterns = _PROJ_OUT_IDENTIFIERS
+ inactive_patterns = _BLOCK_IDENTIFIERS
+ if config.skip_predict_identifiers or config.cache_identifiers:
+ logger.warning("Lite mode overrides user patterns.")
+
+ for name, submodule in module.named_modules():
+ matches_inactive = any(re.fullmatch(pattern, name) for pattern in inactive_patterns)
+ matches_active = any(re.fullmatch(pattern, name) for pattern in active_patterns)
+ if not (matches_inactive or matches_active):
+ continue
+ _apply_taylorseer_cache_hook(
+ module=submodule,
+ config=config,
+ is_inactive=matches_inactive,
+ )
+
+
+def _apply_taylorseer_cache_hook(
+ module: nn.Module,
+ config: TaylorSeerCacheConfig,
+ is_inactive: bool,
+):
+ """
+ Registers the TaylorSeer hook on the specified nn.Module.
+
+ Args:
+ name: Name of the module.
+ module: The nn.Module to be hooked.
+ config: Cache configuration.
+ is_inactive: Whether this module should operate in "inactive" mode.
+ """
+ state_manager = StateManager(
+ TaylorSeerState,
+ init_kwargs={
+ "taylor_factors_dtype": config.taylor_factors_dtype,
+ "max_order": config.max_order,
+ "is_inactive": is_inactive,
+ },
+ )
+
+ registry = HookRegistry.check_if_exists_or_initialize(module)
+
+ hook = TaylorSeerCacheHook(
+ cache_interval=config.cache_interval,
+ disable_cache_before_step=config.disable_cache_before_step,
+ taylor_factors_dtype=config.taylor_factors_dtype,
+ disable_cache_after_step=config.disable_cache_after_step,
+ state_manager=state_manager,
+ )
+
+ registry.register_hook(hook, _TAYLORSEER_CACHE_HOOK)
diff --git a/src/diffusers/loaders/lora_conversion_utils.py b/src/diffusers/loaders/lora_conversion_utils.py
index f3c17cd729..2e87f757c3 100644
--- a/src/diffusers/loaders/lora_conversion_utils.py
+++ b/src/diffusers/loaders/lora_conversion_utils.py
@@ -2417,6 +2417,17 @@ def _convert_non_diffusers_z_image_lora_to_diffusers(state_dict):
state_dict = {convert_key(k): v for k, v in state_dict.items()}
+ def normalize_out_key(k: str) -> str:
+ if ".to_out" in k:
+ return k
+ return re.sub(
+ r"\.out(?=\.(?:lora_down|lora_up)\.weight$|\.alpha$)",
+ ".to_out.0",
+ k,
+ )
+
+ state_dict = {normalize_out_key(k): v for k, v in state_dict.items()}
+
has_default = any("default." in k for k in state_dict)
if has_default:
state_dict = {k.replace("default.", ""): v for k, v in state_dict.items()}
diff --git a/src/diffusers/loaders/single_file_model.py b/src/diffusers/loaders/single_file_model.py
index 7b581ac3eb..803fdfc2d9 100644
--- a/src/diffusers/loaders/single_file_model.py
+++ b/src/diffusers/loaders/single_file_model.py
@@ -49,6 +49,7 @@ from .single_file_utils import (
convert_stable_cascade_unet_single_file_to_diffusers,
convert_wan_transformer_to_diffusers,
convert_wan_vae_to_diffusers,
+ convert_z_image_transformer_checkpoint_to_diffusers,
create_controlnet_diffusers_config_from_ldm,
create_unet_diffusers_config_from_ldm,
create_vae_diffusers_config_from_ldm,
@@ -167,6 +168,10 @@ SINGLE_FILE_LOADABLE_CLASSES = {
"checkpoint_mapping_fn": convert_flux2_transformer_checkpoint_to_diffusers,
"default_subfolder": "transformer",
},
+ "ZImageTransformer2DModel": {
+ "checkpoint_mapping_fn": convert_z_image_transformer_checkpoint_to_diffusers,
+ "default_subfolder": "transformer",
+ },
}
diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py
index 1e94049334..b866a5a21a 100644
--- a/src/diffusers/loaders/single_file_utils.py
+++ b/src/diffusers/loaders/single_file_utils.py
@@ -120,6 +120,7 @@ CHECKPOINT_KEY_NAMES = {
"hunyuan-video": "txt_in.individual_token_refiner.blocks.0.adaLN_modulation.1.bias",
"instruct-pix2pix": "model.diffusion_model.input_blocks.0.0.weight",
"lumina2": ["model.diffusion_model.cap_embedder.0.weight", "cap_embedder.0.weight"],
+ "z-image-turbo": "cap_embedder.0.weight",
"sana": [
"blocks.0.cross_attn.q_linear.weight",
"blocks.0.cross_attn.q_linear.bias",
@@ -218,6 +219,7 @@ DIFFUSERS_DEFAULT_PIPELINE_PATHS = {
"cosmos-2.0-t2i-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-14B-Text2Image"},
"cosmos-2.0-v2w-2B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-2B-Video2World"},
"cosmos-2.0-v2w-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-14B-Video2World"},
+ "z-image-turbo": {"pretrained_model_name_or_path": "Tongyi-MAI/Z-Image-Turbo"},
}
# Use to configure model sample size when original config is provided
@@ -721,6 +723,12 @@ def infer_diffusers_model_type(checkpoint):
):
model_type = "instruct-pix2pix"
+ elif (
+ CHECKPOINT_KEY_NAMES["z-image-turbo"] in checkpoint
+ and checkpoint[CHECKPOINT_KEY_NAMES["z-image-turbo"]].shape[0] == 2560
+ ):
+ model_type = "z-image-turbo"
+
elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["lumina2"]):
model_type = "lumina2"
@@ -3824,3 +3832,56 @@ def convert_flux2_transformer_checkpoint_to_diffusers(checkpoint, **kwargs):
handler_fn_inplace(key, converted_state_dict)
return converted_state_dict
+
+
+def convert_z_image_transformer_checkpoint_to_diffusers(checkpoint, **kwargs):
+ Z_IMAGE_KEYS_RENAME_DICT = {
+ "final_layer.": "all_final_layer.2-1.",
+ "x_embedder.": "all_x_embedder.2-1.",
+ ".attention.out.bias": ".attention.to_out.0.bias",
+ ".attention.k_norm.weight": ".attention.norm_k.weight",
+ ".attention.q_norm.weight": ".attention.norm_q.weight",
+ ".attention.out.weight": ".attention.to_out.0.weight",
+ }
+
+ def convert_z_image_fused_attention(key: str, state_dict: dict[str, object]) -> None:
+ if ".attention.qkv.weight" not in key:
+ return
+
+ fused_qkv_weight = state_dict.pop(key)
+ to_q_weight, to_k_weight, to_v_weight = torch.chunk(fused_qkv_weight, 3, dim=0)
+ new_q_name = key.replace(".attention.qkv.weight", ".attention.to_q.weight")
+ new_k_name = key.replace(".attention.qkv.weight", ".attention.to_k.weight")
+ new_v_name = key.replace(".attention.qkv.weight", ".attention.to_v.weight")
+
+ state_dict[new_q_name] = to_q_weight
+ state_dict[new_k_name] = to_k_weight
+ state_dict[new_v_name] = to_v_weight
+ return
+
+ TRANSFORMER_SPECIAL_KEYS_REMAP = {
+ ".attention.qkv.weight": convert_z_image_fused_attention,
+ }
+
+ def update_state_dict(state_dict: dict[str, object], old_key: str, new_key: str) -> None:
+ state_dict[new_key] = state_dict.pop(old_key)
+
+ converted_state_dict = {key: checkpoint.pop(key) for key in list(checkpoint.keys())}
+
+ # Handle single file --> diffusers key remapping via the remap dict
+ for key in list(converted_state_dict.keys()):
+ new_key = key[:]
+ for replace_key, rename_key in Z_IMAGE_KEYS_RENAME_DICT.items():
+ new_key = new_key.replace(replace_key, rename_key)
+
+ update_state_dict(converted_state_dict, key, new_key)
+
+ # Handle any special logic which can't be expressed by a simple 1:1 remapping with the handlers in
+ # special_keys_remap
+ for key in list(converted_state_dict.keys()):
+ for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items():
+ if special_key not in key:
+ continue
+ handler_fn_inplace(key, converted_state_dict)
+
+ return converted_state_dict
diff --git a/src/diffusers/models/autoencoders/autoencoder_kl.py b/src/diffusers/models/autoencoders/autoencoder_kl.py
index 265f2abcfb..95991dca33 100644
--- a/src/diffusers/models/autoencoders/autoencoder_kl.py
+++ b/src/diffusers/models/autoencoders/autoencoder_kl.py
@@ -74,6 +74,7 @@ class AutoencoderKL(
_supports_gradient_checkpointing = True
_no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D"]
+ _group_offload_block_modules = ["quant_conv", "post_quant_conv", "encoder", "decoder"]
@register_to_config
def __init__(
diff --git a/src/diffusers/models/autoencoders/autoencoder_kl_wan.py b/src/diffusers/models/autoencoders/autoencoder_kl_wan.py
index b0b2960aaf..761dff2dc6 100644
--- a/src/diffusers/models/autoencoders/autoencoder_kl_wan.py
+++ b/src/diffusers/models/autoencoders/autoencoder_kl_wan.py
@@ -619,6 +619,7 @@ class WanEncoder3d(nn.Module):
feat_idx[0] += 1
else:
x = self.conv_out(x)
+
return x
@@ -961,6 +962,7 @@ class AutoencoderKLWan(ModelMixin, AutoencoderMixin, ConfigMixin, FromOriginalMo
"""
_supports_gradient_checkpointing = False
+ _group_offload_block_modules = ["quant_conv", "post_quant_conv", "encoder", "decoder"]
# keys toignore when AlignDeviceHook moves inputs/outputs between devices
# these are shared mutable state modified in-place
_skip_keys = ["feat_cache", "feat_idx"]
@@ -1259,14 +1261,20 @@ class AutoencoderKLWan(ModelMixin, AutoencoderMixin, ConfigMixin, FromOriginalMo
`torch.Tensor`:
The latent representation of the encoded videos.
"""
- _, _, num_frames, height, width = x.shape
- latent_height = height // self.spatial_compression_ratio
- latent_width = width // self.spatial_compression_ratio
- tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
- tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
- tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
- tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
+ _, _, num_frames, height, width = x.shape
+ encode_spatial_compression_ratio = self.spatial_compression_ratio
+ if self.config.patch_size is not None:
+ assert encode_spatial_compression_ratio % self.config.patch_size == 0
+ encode_spatial_compression_ratio = self.spatial_compression_ratio // self.config.patch_size
+
+ latent_height = height // encode_spatial_compression_ratio
+ latent_width = width // encode_spatial_compression_ratio
+
+ tile_latent_min_height = self.tile_sample_min_height // encode_spatial_compression_ratio
+ tile_latent_min_width = self.tile_sample_min_width // encode_spatial_compression_ratio
+ tile_latent_stride_height = self.tile_sample_stride_height // encode_spatial_compression_ratio
+ tile_latent_stride_width = self.tile_sample_stride_width // encode_spatial_compression_ratio
blend_height = tile_latent_min_height - tile_latent_stride_height
blend_width = tile_latent_min_width - tile_latent_stride_width
@@ -1408,6 +1416,7 @@ class AutoencoderKLWan(ModelMixin, AutoencoderMixin, ConfigMixin, FromOriginalMo
"""
x = sample
posterior = self.encode(x).latent_dist
+
if sample_posterior:
z = posterior.sample(generator=generator)
else:
diff --git a/src/diffusers/models/cache_utils.py b/src/diffusers/models/cache_utils.py
index 605c0d588c..f4ad1af278 100644
--- a/src/diffusers/models/cache_utils.py
+++ b/src/diffusers/models/cache_utils.py
@@ -67,9 +67,11 @@ class CacheMixin:
FasterCacheConfig,
FirstBlockCacheConfig,
PyramidAttentionBroadcastConfig,
+ TaylorSeerCacheConfig,
apply_faster_cache,
apply_first_block_cache,
apply_pyramid_attention_broadcast,
+ apply_taylorseer_cache,
)
if self.is_cache_enabled:
@@ -83,16 +85,25 @@ class CacheMixin:
apply_first_block_cache(self, config)
elif isinstance(config, PyramidAttentionBroadcastConfig):
apply_pyramid_attention_broadcast(self, config)
+ elif isinstance(config, TaylorSeerCacheConfig):
+ apply_taylorseer_cache(self, config)
else:
raise ValueError(f"Cache config {type(config)} is not supported.")
self._cache_config = config
def disable_cache(self) -> None:
- from ..hooks import FasterCacheConfig, FirstBlockCacheConfig, HookRegistry, PyramidAttentionBroadcastConfig
+ from ..hooks import (
+ FasterCacheConfig,
+ FirstBlockCacheConfig,
+ HookRegistry,
+ PyramidAttentionBroadcastConfig,
+ TaylorSeerCacheConfig,
+ )
from ..hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK
from ..hooks.first_block_cache import _FBC_BLOCK_HOOK, _FBC_LEADER_BLOCK_HOOK
from ..hooks.pyramid_attention_broadcast import _PYRAMID_ATTENTION_BROADCAST_HOOK
+ from ..hooks.taylorseer_cache import _TAYLORSEER_CACHE_HOOK
if self._cache_config is None:
logger.warning("Caching techniques have not been enabled, so there's nothing to disable.")
@@ -107,6 +118,8 @@ class CacheMixin:
registry.remove_hook(_FBC_BLOCK_HOOK, recurse=True)
elif isinstance(self._cache_config, PyramidAttentionBroadcastConfig):
registry.remove_hook(_PYRAMID_ATTENTION_BROADCAST_HOOK, recurse=True)
+ elif isinstance(self._cache_config, TaylorSeerCacheConfig):
+ registry.remove_hook(_TAYLORSEER_CACHE_HOOK, recurse=True)
else:
raise ValueError(f"Cache config {type(self._cache_config)} is not supported.")
diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py
index f06822c741..41da95d3a2 100644
--- a/src/diffusers/models/modeling_utils.py
+++ b/src/diffusers/models/modeling_utils.py
@@ -531,6 +531,8 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
record_stream: bool = False,
low_cpu_mem_usage=False,
offload_to_disk_path: Optional[str] = None,
+ block_modules: Optional[str] = None,
+ exclude_kwargs: Optional[str] = None,
) -> None:
r"""
Activates group offloading for the current model.
@@ -570,6 +572,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
f"`_supports_group_offloading` to `True` in the class definition. If you believe this is a mistake, please "
f"open an issue at https://github.com/huggingface/diffusers/issues."
)
+
apply_group_offloading(
module=self,
onload_device=onload_device,
@@ -581,6 +584,8 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
record_stream=record_stream,
low_cpu_mem_usage=low_cpu_mem_usage,
offload_to_disk_path=offload_to_disk_path,
+ block_modules=block_modules,
+ exclude_kwargs=exclude_kwargs,
)
def set_attention_backend(self, backend: str) -> None:
diff --git a/src/diffusers/models/transformers/transformer_hunyuan_video15.py b/src/diffusers/models/transformers/transformer_hunyuan_video15.py
index 76a02cb1a8..293ba996ea 100644
--- a/src/diffusers/models/transformers/transformer_hunyuan_video15.py
+++ b/src/diffusers/models/transformers/transformer_hunyuan_video15.py
@@ -184,19 +184,32 @@ class HunyuanVideo15TimeEmbedding(nn.Module):
The dimension of the output embedding.
"""
- def __init__(self, embedding_dim: int):
+ def __init__(self, embedding_dim: int, use_meanflow: bool = False):
super().__init__()
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
+ self.use_meanflow = use_meanflow
+ self.time_proj_r = None
+ self.timestep_embedder_r = None
+ if use_meanflow:
+ self.time_proj_r = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
+ self.timestep_embedder_r = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
+
def forward(
self,
timestep: torch.Tensor,
+ timestep_r: Optional[torch.Tensor] = None,
) -> torch.Tensor:
timesteps_proj = self.time_proj(timestep)
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=timestep.dtype))
+ if timestep_r is not None:
+ timesteps_proj_r = self.time_proj_r(timestep_r)
+ timesteps_emb_r = self.timestep_embedder_r(timesteps_proj_r.to(dtype=timestep.dtype))
+ timesteps_emb = timesteps_emb + timesteps_emb_r
+
return timesteps_emb
@@ -567,6 +580,7 @@ class HunyuanVideo15Transformer3DModel(
# YiYi Notes: config based on target_size_config https://github.com/yiyixuxu/hy15/blob/main/hyvideo/pipelines/hunyuan_video_pipeline.py#L205
target_size: int = 640, # did not name sample_size since it is in pixel spaces
task_type: str = "i2v",
+ use_meanflow: bool = False,
) -> None:
super().__init__()
@@ -582,7 +596,7 @@ class HunyuanVideo15Transformer3DModel(
)
self.context_embedder_2 = HunyuanVideo15ByT5TextProjection(text_embed_2_dim, 2048, inner_dim)
- self.time_embed = HunyuanVideo15TimeEmbedding(inner_dim)
+ self.time_embed = HunyuanVideo15TimeEmbedding(inner_dim, use_meanflow=use_meanflow)
self.cond_type_embed = nn.Embedding(3, inner_dim)
@@ -612,6 +626,7 @@ class HunyuanVideo15Transformer3DModel(
timestep: torch.LongTensor,
encoder_hidden_states: torch.Tensor,
encoder_attention_mask: torch.Tensor,
+ timestep_r: Optional[torch.LongTensor] = None,
encoder_hidden_states_2: Optional[torch.Tensor] = None,
encoder_attention_mask_2: Optional[torch.Tensor] = None,
image_embeds: Optional[torch.Tensor] = None,
@@ -643,7 +658,7 @@ class HunyuanVideo15Transformer3DModel(
image_rotary_emb = self.rope(hidden_states)
# 2. Conditional embeddings
- temb = self.time_embed(timestep)
+ temb = self.time_embed(timestep, timestep_r=timestep_r)
hidden_states = self.x_embedder(hidden_states)
diff --git a/src/diffusers/models/transformers/transformer_prx.py b/src/diffusers/models/transformers/transformer_prx.py
index ccbc83ffca..a87c120fdc 100644
--- a/src/diffusers/models/transformers/transformer_prx.py
+++ b/src/diffusers/models/transformers/transformer_prx.py
@@ -16,7 +16,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
-from torch.nn.functional import fold, unfold
from ...configuration_utils import ConfigMixin, register_to_config
from ...utils import logging
@@ -532,7 +531,19 @@ def img2seq(img: torch.Tensor, patch_size: int) -> torch.Tensor:
Flattened patch sequence of shape `(B, L, C * patch_size * patch_size)`, where `L = (H // patch_size) * (W
// patch_size)` is the number of patches.
"""
- return unfold(img, kernel_size=patch_size, stride=patch_size).transpose(1, 2)
+ b, c, h, w = img.shape
+ p = patch_size
+
+ # Reshape to (B, C, H//p, p, W//p, p) separating grid and patch dimensions
+ img = img.reshape(b, c, h // p, p, w // p, p)
+
+ # Permute to (B, H//p, W//p, C, p, p) using einsum
+ # n=batch, c=channels, h=grid_height, p=patch_height, w=grid_width, q=patch_width
+ img = torch.einsum("nchpwq->nhwcpq", img)
+
+ # Flatten to (B, L, C * p * p)
+ img = img.reshape(b, -1, c * p * p)
+ return img
def seq2img(seq: torch.Tensor, patch_size: int, shape: torch.Tensor) -> torch.Tensor:
@@ -554,12 +565,26 @@ def seq2img(seq: torch.Tensor, patch_size: int, shape: torch.Tensor) -> torch.Te
Reconstructed image tensor of shape `(B, C, H, W)`.
"""
if isinstance(shape, tuple):
- shape = shape[-2:]
+ h, w = shape[-2:]
elif isinstance(shape, torch.Tensor):
- shape = (int(shape[0]), int(shape[1]))
+ h, w = (int(shape[0]), int(shape[1]))
else:
raise NotImplementedError(f"shape type {type(shape)} not supported")
- return fold(seq.transpose(1, 2), shape, kernel_size=patch_size, stride=patch_size)
+
+ b, l, d = seq.shape
+ p = patch_size
+ c = d // (p * p)
+
+ # Reshape back to grid structure: (B, H//p, W//p, C, p, p)
+ seq = seq.reshape(b, h // p, w // p, c, p, p)
+
+ # Permute back to image layout: (B, C, H//p, p, W//p, p)
+ # n=batch, h=grid_height, w=grid_width, c=channels, p=patch_height, q=patch_width
+ seq = torch.einsum("nhwcpq->nchpwq", seq)
+
+ # Final reshape to (B, C, H, W)
+ seq = seq.reshape(b, c, h, w)
+ return seq
class PRXTransformer2DModel(ModelMixin, ConfigMixin, AttentionMixin):
@@ -694,6 +719,7 @@ class PRXTransformer2DModel(ModelMixin, ConfigMixin, AttentionMixin):
max_period=self.time_max_period,
scale=self.time_factor,
flip_sin_to_cos=True, # Match original cos, sin order
+ downscale_freq_shift=0.0,
).to(dtype)
)
diff --git a/src/diffusers/models/transformers/transformer_z_image.py b/src/diffusers/models/transformers/transformer_z_image.py
index 1459e5974e..5c401b9d20 100644
--- a/src/diffusers/models/transformers/transformer_z_image.py
+++ b/src/diffusers/models/transformers/transformer_z_image.py
@@ -63,8 +63,11 @@ class TimestepEmbedder(nn.Module):
def forward(self, t):
t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
weight_dtype = self.mlp[0].weight.dtype
+ compute_dtype = getattr(self.mlp[0], "compute_dtype", None)
if weight_dtype.is_floating_point:
t_freq = t_freq.to(weight_dtype)
+ elif compute_dtype is not None:
+ t_freq = t_freq.to(compute_dtype)
t_emb = self.mlp(t_freq)
return t_emb
diff --git a/src/diffusers/modular_pipelines/__init__.py b/src/diffusers/modular_pipelines/__init__.py
index 252b9f33df..dea9da0269 100644
--- a/src/diffusers/modular_pipelines/__init__.py
+++ b/src/diffusers/modular_pipelines/__init__.py
@@ -60,6 +60,10 @@ else:
"QwenImageEditPlusModularPipeline",
"QwenImageEditPlusAutoBlocks",
]
+ _import_structure["z_image"] = [
+ "ZImageAutoBlocks",
+ "ZImageModularPipeline",
+ ]
_import_structure["components_manager"] = ["ComponentsManager"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
@@ -91,6 +95,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
)
from .stable_diffusion_xl import StableDiffusionXLAutoBlocks, StableDiffusionXLModularPipeline
from .wan import Wan22AutoBlocks, WanAutoBlocks, WanModularPipeline
+ from .z_image import ZImageAutoBlocks, ZImageModularPipeline
else:
import sys
diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py
index a6336de71a..bba89e6121 100644
--- a/src/diffusers/modular_pipelines/modular_pipeline.py
+++ b/src/diffusers/modular_pipelines/modular_pipeline.py
@@ -61,6 +61,7 @@ MODULAR_PIPELINE_MAPPING = OrderedDict(
("qwenimage", "QwenImageModularPipeline"),
("qwenimage-edit", "QwenImageEditModularPipeline"),
("qwenimage-edit-plus", "QwenImageEditPlusModularPipeline"),
+ ("z-image", "ZImageModularPipeline"),
]
)
diff --git a/src/diffusers/modular_pipelines/qwenimage/before_denoise.py b/src/diffusers/modular_pipelines/qwenimage/before_denoise.py
index 0e470332c6..bd92d40353 100644
--- a/src/diffusers/modular_pipelines/qwenimage/before_denoise.py
+++ b/src/diffusers/modular_pipelines/qwenimage/before_denoise.py
@@ -610,7 +610,6 @@ class QwenImageEditRoPEInputsStep(ModularPipelineBlocks):
block_state = self.get_block_state(state)
# for edit, image size can be different from the target size (height/width)
-
block_state.img_shapes = [
[
(
@@ -640,6 +639,37 @@ class QwenImageEditRoPEInputsStep(ModularPipelineBlocks):
return components, state
+class QwenImageEditPlusRoPEInputsStep(QwenImageEditRoPEInputsStep):
+ model_name = "qwenimage-edit-plus"
+
+ def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+
+ vae_scale_factor = components.vae_scale_factor
+ block_state.img_shapes = [
+ [
+ (1, block_state.height // vae_scale_factor // 2, block_state.width // vae_scale_factor // 2),
+ *[
+ (1, vae_height // vae_scale_factor // 2, vae_width // vae_scale_factor // 2)
+ for vae_height, vae_width in zip(block_state.image_height, block_state.image_width)
+ ],
+ ]
+ ] * block_state.batch_size
+
+ block_state.txt_seq_lens = (
+ block_state.prompt_embeds_mask.sum(dim=1).tolist() if block_state.prompt_embeds_mask is not None else None
+ )
+ block_state.negative_txt_seq_lens = (
+ block_state.negative_prompt_embeds_mask.sum(dim=1).tolist()
+ if block_state.negative_prompt_embeds_mask is not None
+ else None
+ )
+
+ self.set_block_state(state, block_state)
+
+ return components, state
+
+
## ControlNet inputs for denoiser
class QwenImageControlNetBeforeDenoiserStep(ModularPipelineBlocks):
model_name = "qwenimage"
diff --git a/src/diffusers/modular_pipelines/qwenimage/encoders.py b/src/diffusers/modular_pipelines/qwenimage/encoders.py
index 3b56981e52..b126a368bf 100644
--- a/src/diffusers/modular_pipelines/qwenimage/encoders.py
+++ b/src/diffusers/modular_pipelines/qwenimage/encoders.py
@@ -330,7 +330,7 @@ class QwenImageEditPlusResizeDynamicStep(QwenImageEditResizeDynamicStep):
output_name: str = "resized_image",
vae_image_output_name: str = "vae_image",
):
- """Create a configurable step for resizing images to the target area (1024 * 1024) while maintaining the aspect ratio.
+ """Create a configurable step for resizing images to the target area (384 * 384) while maintaining the aspect ratio.
This block resizes an input image or a list input images and exposes the resized result under configurable
input and output names. Use this when you need to wire the resize step to different image fields (e.g.,
@@ -809,9 +809,7 @@ class QwenImageProcessImagesInputStep(ModularPipelineBlocks):
@property
def intermediate_outputs(self) -> List[OutputParam]:
- return [
- OutputParam(name="processed_image"),
- ]
+ return [OutputParam(name="processed_image")]
@staticmethod
def check_inputs(height, width, vae_scale_factor):
@@ -851,7 +849,10 @@ class QwenImageProcessImagesInputStep(ModularPipelineBlocks):
class QwenImageEditPlusProcessImagesInputStep(QwenImageProcessImagesInputStep):
model_name = "qwenimage-edit-plus"
- vae_image_size = 1024 * 1024
+
+ def __init__(self):
+ self.vae_image_size = 1024 * 1024
+ super().__init__()
@property
def description(self) -> str:
@@ -868,6 +869,7 @@ class QwenImageEditPlusProcessImagesInputStep(QwenImageProcessImagesInputStep):
if block_state.vae_image is None and block_state.image is None:
raise ValueError("`vae_image` and `image` cannot be None at the same time")
+ vae_image_sizes = None
if block_state.vae_image is None:
image = block_state.image
self.check_inputs(
@@ -879,12 +881,19 @@ class QwenImageEditPlusProcessImagesInputStep(QwenImageProcessImagesInputStep):
image=image, height=height, width=width
)
else:
- width, height = block_state.vae_image[0].size
- image = block_state.vae_image
+ # QwenImage Edit Plus can allow multiple input images with varied resolutions
+ processed_images = []
+ vae_image_sizes = []
+ for img in block_state.vae_image:
+ width, height = img.size
+ vae_width, vae_height, _ = calculate_dimensions(self.vae_image_size, width / height)
+ vae_image_sizes.append((vae_width, vae_height))
+ processed_images.append(
+ components.image_processor.preprocess(image=img, height=vae_height, width=vae_width)
+ )
+ block_state.processed_image = processed_images
- block_state.processed_image = components.image_processor.preprocess(
- image=image, height=height, width=width
- )
+ block_state.vae_image_sizes = vae_image_sizes
self.set_block_state(state, block_state)
return components, state
@@ -926,17 +935,12 @@ class QwenImageVaeEncoderDynamicStep(ModularPipelineBlocks):
@property
def expected_components(self) -> List[ComponentSpec]:
- components = [
- ComponentSpec("vae", AutoencoderKLQwenImage),
- ]
+ components = [ComponentSpec("vae", AutoencoderKLQwenImage)]
return components
@property
def inputs(self) -> List[InputParam]:
- inputs = [
- InputParam(self._image_input_name, required=True),
- InputParam("generator"),
- ]
+ inputs = [InputParam(self._image_input_name, required=True), InputParam("generator")]
return inputs
@property
@@ -974,6 +978,50 @@ class QwenImageVaeEncoderDynamicStep(ModularPipelineBlocks):
return components, state
+class QwenImageEditPlusVaeEncoderDynamicStep(QwenImageVaeEncoderDynamicStep):
+ model_name = "qwenimage-edit-plus"
+
+ @property
+ def intermediate_outputs(self) -> List[OutputParam]:
+ # Each reference image latent can have varied resolutions hence we return this as a list.
+ return [
+ OutputParam(
+ self._image_latents_output_name,
+ type_hint=List[torch.Tensor],
+ description="The latents representing the reference image(s).",
+ )
+ ]
+
+ @torch.no_grad()
+ def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+
+ device = components._execution_device
+ dtype = components.vae.dtype
+
+ image = getattr(block_state, self._image_input_name)
+
+ # Encode image into latents
+ image_latents = []
+ for img in image:
+ image_latents.append(
+ encode_vae_image(
+ image=img,
+ vae=components.vae,
+ generator=block_state.generator,
+ device=device,
+ dtype=dtype,
+ latent_channels=components.num_channels_latents,
+ )
+ )
+
+ setattr(block_state, self._image_latents_output_name, image_latents)
+
+ self.set_block_state(state, block_state)
+
+ return components, state
+
+
class QwenImageControlNetVaeEncoderStep(ModularPipelineBlocks):
model_name = "qwenimage"
diff --git a/src/diffusers/modular_pipelines/qwenimage/inputs.py b/src/diffusers/modular_pipelines/qwenimage/inputs.py
index 2b229c040b..6e656e4848 100644
--- a/src/diffusers/modular_pipelines/qwenimage/inputs.py
+++ b/src/diffusers/modular_pipelines/qwenimage/inputs.py
@@ -224,11 +224,7 @@ class QwenImageTextInputsStep(ModularPipelineBlocks):
class QwenImageInputsDynamicStep(ModularPipelineBlocks):
model_name = "qwenimage"
- def __init__(
- self,
- image_latent_inputs: List[str] = ["image_latents"],
- additional_batch_inputs: List[str] = [],
- ):
+ def __init__(self, image_latent_inputs: List[str] = ["image_latents"], additional_batch_inputs: List[str] = []):
"""Initialize a configurable step that standardizes the inputs for the denoising step. It:\n"
This step handles multiple common tasks to prepare inputs for the denoising step:
@@ -372,6 +368,76 @@ class QwenImageInputsDynamicStep(ModularPipelineBlocks):
return components, state
+class QwenImageEditPlusInputsDynamicStep(QwenImageInputsDynamicStep):
+ model_name = "qwenimage-edit-plus"
+
+ @property
+ def intermediate_outputs(self) -> List[OutputParam]:
+ return [
+ OutputParam(name="image_height", type_hint=List[int], description="The height of the image latents"),
+ OutputParam(name="image_width", type_hint=List[int], description="The width of the image latents"),
+ ]
+
+ def __call__(self, components: QwenImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+
+ # Process image latent inputs (height/width calculation, patchify, and batch expansion)
+ for image_latent_input_name in self._image_latent_inputs:
+ image_latent_tensor = getattr(block_state, image_latent_input_name)
+ if image_latent_tensor is None:
+ continue
+
+ # Each image latent can have different size in QwenImage Edit Plus.
+ image_heights = []
+ image_widths = []
+ packed_image_latent_tensors = []
+
+ for img_latent_tensor in image_latent_tensor:
+ # 1. Calculate height/width from latents
+ height, width = calculate_dimension_from_latents(img_latent_tensor, components.vae_scale_factor)
+ image_heights.append(height)
+ image_widths.append(width)
+
+ # 2. Patchify the image latent tensor
+ img_latent_tensor = components.pachifier.pack_latents(img_latent_tensor)
+
+ # 3. Expand batch size
+ img_latent_tensor = repeat_tensor_to_batch_size(
+ input_name=image_latent_input_name,
+ input_tensor=img_latent_tensor,
+ num_images_per_prompt=block_state.num_images_per_prompt,
+ batch_size=block_state.batch_size,
+ )
+ packed_image_latent_tensors.append(img_latent_tensor)
+
+ packed_image_latent_tensors = torch.cat(packed_image_latent_tensors, dim=1)
+ block_state.image_height = image_heights
+ block_state.image_width = image_widths
+ setattr(block_state, image_latent_input_name, packed_image_latent_tensors)
+
+ block_state.height = block_state.height or image_heights[-1]
+ block_state.width = block_state.width or image_widths[-1]
+
+ # Process additional batch inputs (only batch expansion)
+ for input_name in self._additional_batch_inputs:
+ input_tensor = getattr(block_state, input_name)
+ if input_tensor is None:
+ continue
+
+ # Only expand batch size
+ input_tensor = repeat_tensor_to_batch_size(
+ input_name=input_name,
+ input_tensor=input_tensor,
+ num_images_per_prompt=block_state.num_images_per_prompt,
+ batch_size=block_state.batch_size,
+ )
+
+ setattr(block_state, input_name, input_tensor)
+
+ self.set_block_state(state, block_state)
+ return components, state
+
+
class QwenImageControlNetInputsStep(ModularPipelineBlocks):
model_name = "qwenimage"
diff --git a/src/diffusers/modular_pipelines/qwenimage/modular_blocks.py b/src/diffusers/modular_pipelines/qwenimage/modular_blocks.py
index 4198941643..55a7ae328f 100644
--- a/src/diffusers/modular_pipelines/qwenimage/modular_blocks.py
+++ b/src/diffusers/modular_pipelines/qwenimage/modular_blocks.py
@@ -18,6 +18,7 @@ from ..modular_pipeline_utils import InsertableDict
from .before_denoise import (
QwenImageControlNetBeforeDenoiserStep,
QwenImageCreateMaskLatentsStep,
+ QwenImageEditPlusRoPEInputsStep,
QwenImageEditRoPEInputsStep,
QwenImagePrepareLatentsStep,
QwenImagePrepareLatentsWithStrengthStep,
@@ -40,6 +41,7 @@ from .encoders import (
QwenImageEditPlusProcessImagesInputStep,
QwenImageEditPlusResizeDynamicStep,
QwenImageEditPlusTextEncoderStep,
+ QwenImageEditPlusVaeEncoderDynamicStep,
QwenImageEditResizeDynamicStep,
QwenImageEditTextEncoderStep,
QwenImageInpaintProcessImagesInputStep,
@@ -47,7 +49,12 @@ from .encoders import (
QwenImageTextEncoderStep,
QwenImageVaeEncoderDynamicStep,
)
-from .inputs import QwenImageControlNetInputsStep, QwenImageInputsDynamicStep, QwenImageTextInputsStep
+from .inputs import (
+ QwenImageControlNetInputsStep,
+ QwenImageEditPlusInputsDynamicStep,
+ QwenImageInputsDynamicStep,
+ QwenImageTextInputsStep,
+)
logger = logging.get_logger(__name__)
@@ -904,13 +911,13 @@ QwenImageEditPlusVaeEncoderBlocks = InsertableDict(
[
("resize", QwenImageEditPlusResizeDynamicStep()), # edit plus has a different resize step
("preprocess", QwenImageEditPlusProcessImagesInputStep()), # vae_image -> processed_image
- ("encode", QwenImageVaeEncoderDynamicStep()), # processed_image -> image_latents
+ ("encode", QwenImageEditPlusVaeEncoderDynamicStep()), # processed_image -> image_latents
]
)
class QwenImageEditPlusVaeEncoderStep(SequentialPipelineBlocks):
- model_name = "qwenimage"
+ model_name = "qwenimage-edit-plus"
block_classes = QwenImageEditPlusVaeEncoderBlocks.values()
block_names = QwenImageEditPlusVaeEncoderBlocks.keys()
@@ -919,25 +926,62 @@ class QwenImageEditPlusVaeEncoderStep(SequentialPipelineBlocks):
return "Vae encoder step that encode the image inputs into their latent representations."
+#### QwenImage Edit Plus input blocks
+QwenImageEditPlusInputBlocks = InsertableDict(
+ [
+ ("text_inputs", QwenImageTextInputsStep()), # default step to process text embeddings
+ (
+ "additional_inputs",
+ QwenImageEditPlusInputsDynamicStep(image_latent_inputs=["image_latents"]),
+ ),
+ ]
+)
+
+
+class QwenImageEditPlusInputStep(SequentialPipelineBlocks):
+ model_name = "qwenimage-edit-plus"
+ block_classes = QwenImageEditPlusInputBlocks.values()
+ block_names = QwenImageEditPlusInputBlocks.keys()
+
+
#### QwenImage Edit Plus presets
EDIT_PLUS_BLOCKS = InsertableDict(
[
("text_encoder", QwenImageEditPlusVLEncoderStep()),
("vae_encoder", QwenImageEditPlusVaeEncoderStep()),
- ("input", QwenImageEditInputStep()),
+ ("input", QwenImageEditPlusInputStep()),
("prepare_latents", QwenImagePrepareLatentsStep()),
("set_timesteps", QwenImageSetTimestepsStep()),
- ("prepare_rope_inputs", QwenImageEditRoPEInputsStep()),
+ ("prepare_rope_inputs", QwenImageEditPlusRoPEInputsStep()),
("denoise", QwenImageEditDenoiseStep()),
("decode", QwenImageDecodeStep()),
]
)
+QwenImageEditPlusBeforeDenoiseBlocks = InsertableDict(
+ [
+ ("prepare_latents", QwenImagePrepareLatentsStep()),
+ ("set_timesteps", QwenImageSetTimestepsStep()),
+ ("prepare_rope_inputs", QwenImageEditPlusRoPEInputsStep()),
+ ]
+)
+
+
+class QwenImageEditPlusBeforeDenoiseStep(SequentialPipelineBlocks):
+ model_name = "qwenimage-edit-plus"
+ block_classes = QwenImageEditPlusBeforeDenoiseBlocks.values()
+ block_names = QwenImageEditPlusBeforeDenoiseBlocks.keys()
+
+ @property
+ def description(self):
+ return "Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step for edit task."
+
+
# auto before_denoise step for edit tasks
class QwenImageEditPlusAutoBeforeDenoiseStep(AutoPipelineBlocks):
model_name = "qwenimage-edit-plus"
- block_classes = [QwenImageEditBeforeDenoiseStep]
+ block_classes = [QwenImageEditPlusBeforeDenoiseStep]
block_names = ["edit"]
block_trigger_inputs = ["image_latents"]
@@ -946,7 +990,7 @@ class QwenImageEditPlusAutoBeforeDenoiseStep(AutoPipelineBlocks):
return (
"Before denoise step that prepare the inputs (timesteps, latents, rope inputs etc.) for the denoise step.\n"
+ "This is an auto pipeline block that works for edit (img2img) task.\n"
- + " - `QwenImageEditBeforeDenoiseStep` (edit) is used when `image_latents` is provided and `processed_mask_image` is not provided.\n"
+ + " - `QwenImageEditPlusBeforeDenoiseStep` (edit) is used when `image_latents` is provided and `processed_mask_image` is not provided.\n"
+ " - if `image_latents` is not provided, step will be skipped."
)
@@ -955,9 +999,7 @@ class QwenImageEditPlusAutoBeforeDenoiseStep(AutoPipelineBlocks):
class QwenImageEditPlusAutoVaeEncoderStep(AutoPipelineBlocks):
- block_classes = [
- QwenImageEditPlusVaeEncoderStep,
- ]
+ block_classes = [QwenImageEditPlusVaeEncoderStep]
block_names = ["edit"]
block_trigger_inputs = ["image"]
@@ -974,10 +1016,25 @@ class QwenImageEditPlusAutoVaeEncoderStep(AutoPipelineBlocks):
## 3.3 QwenImage-Edit/auto blocks & presets
+class QwenImageEditPlusAutoInputStep(AutoPipelineBlocks):
+ block_classes = [QwenImageEditPlusInputStep]
+ block_names = ["edit"]
+ block_trigger_inputs = ["image_latents"]
+
+ @property
+ def description(self):
+ return (
+ "Input step that prepares the inputs for the edit denoising step.\n"
+ + " It is an auto pipeline block that works for edit task.\n"
+ + " - `QwenImageEditPlusInputStep` (edit) is used when `image_latents` is provided.\n"
+ + " - if `image_latents` is not provided, step will be skipped."
+ )
+
+
class QwenImageEditPlusCoreDenoiseStep(SequentialPipelineBlocks):
model_name = "qwenimage-edit-plus"
block_classes = [
- QwenImageEditAutoInputStep,
+ QwenImageEditPlusAutoInputStep,
QwenImageEditPlusAutoBeforeDenoiseStep,
QwenImageEditAutoDenoiseStep,
]
diff --git a/src/diffusers/modular_pipelines/wan/encoders.py b/src/diffusers/modular_pipelines/wan/encoders.py
index dc49df8eab..4fd69c6ca6 100644
--- a/src/diffusers/modular_pipelines/wan/encoders.py
+++ b/src/diffusers/modular_pipelines/wan/encoders.py
@@ -530,6 +530,7 @@ class WanVaeImageEncoderStep(ModularPipelineBlocks):
device = components._execution_device
dtype = torch.float32
+ vae_dtype = components.vae.dtype
height = block_state.height or components.default_height
width = block_state.width or components.default_width
@@ -555,7 +556,7 @@ class WanVaeImageEncoderStep(ModularPipelineBlocks):
vae=components.vae,
generator=block_state.generator,
device=device,
- dtype=dtype,
+ dtype=vae_dtype,
latent_channels=components.num_channels_latents,
)
@@ -627,6 +628,7 @@ class WanFirstLastFrameVaeImageEncoderStep(ModularPipelineBlocks):
device = components._execution_device
dtype = torch.float32
+ vae_dtype = components.vae.dtype
height = block_state.height or components.default_height
width = block_state.width or components.default_width
@@ -659,7 +661,7 @@ class WanFirstLastFrameVaeImageEncoderStep(ModularPipelineBlocks):
vae=components.vae,
generator=block_state.generator,
device=device,
- dtype=dtype,
+ dtype=vae_dtype,
latent_channels=components.num_channels_latents,
)
diff --git a/src/diffusers/modular_pipelines/z_image/__init__.py b/src/diffusers/modular_pipelines/z_image/__init__.py
new file mode 100644
index 0000000000..c8a8c14396
--- /dev/null
+++ b/src/diffusers/modular_pipelines/z_image/__init__.py
@@ -0,0 +1,57 @@
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ DIFFUSERS_SLOW_IMPORT,
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ get_objects_from_module,
+ is_torch_available,
+ is_transformers_available,
+)
+
+
+_dummy_objects = {}
+_import_structure = {}
+
+try:
+ if not (is_transformers_available() and is_torch_available()):
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
+
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
+else:
+ _import_structure["decoders"] = ["ZImageVaeDecoderStep"]
+ _import_structure["encoders"] = ["ZImageTextEncoderStep", "ZImageVaeImageEncoderStep"]
+ _import_structure["modular_blocks"] = [
+ "ALL_BLOCKS",
+ "ZImageAutoBlocks",
+ ]
+ _import_structure["modular_pipeline"] = ["ZImageModularPipeline"]
+
+if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
+ try:
+ if not (is_transformers_available() and is_torch_available()):
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
+ else:
+ from .decoders import ZImageVaeDecoderStep
+ from .encoders import ZImageTextEncoderStep
+ from .modular_blocks import (
+ ALL_BLOCKS,
+ ZImageAutoBlocks,
+ )
+ from .modular_pipeline import ZImageModularPipeline
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(
+ __name__,
+ globals()["__file__"],
+ _import_structure,
+ module_spec=__spec__,
+ )
+
+ for name, value in _dummy_objects.items():
+ setattr(sys.modules[__name__], name, value)
diff --git a/src/diffusers/modular_pipelines/z_image/before_denoise.py b/src/diffusers/modular_pipelines/z_image/before_denoise.py
new file mode 100644
index 0000000000..35ea768f12
--- /dev/null
+++ b/src/diffusers/modular_pipelines/z_image/before_denoise.py
@@ -0,0 +1,621 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+from typing import List, Optional, Tuple, Union
+
+import torch
+
+from ...models import ZImageTransformer2DModel
+from ...schedulers import FlowMatchEulerDiscreteScheduler
+from ...utils import logging
+from ...utils.torch_utils import randn_tensor
+from ..modular_pipeline import ModularPipelineBlocks, PipelineState
+from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam
+from .modular_pipeline import ZImageModularPipeline
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+# TODO(yiyi, aryan): We need another step before text encoder to set the `num_inference_steps` attribute for guider so that
+# things like when to do guidance and how many conditions to be prepared can be determined. Currently, this is done by
+# always assuming you want to do guidance in the Guiders. So, negative embeddings are prepared regardless of what the
+# configuration of guider is.
+
+
+def repeat_tensor_to_batch_size(
+ input_name: str,
+ input_tensor: torch.Tensor,
+ batch_size: int,
+ num_images_per_prompt: int = 1,
+) -> torch.Tensor:
+ """Repeat tensor elements to match the final batch size.
+
+ This function expands a tensor's batch dimension to match the final batch size (batch_size * num_images_per_prompt)
+ by repeating each element along dimension 0.
+
+ The input tensor must have batch size 1 or batch_size. The function will:
+ - If batch size is 1: repeat each element (batch_size * num_images_per_prompt) times
+ - If batch size equals batch_size: repeat each element num_images_per_prompt times
+
+ Args:
+ input_name (str): Name of the input tensor (used for error messages)
+ input_tensor (torch.Tensor): The tensor to repeat. Must have batch size 1 or batch_size.
+ batch_size (int): The base batch size (number of prompts)
+ num_images_per_prompt (int, optional): Number of images to generate per prompt. Defaults to 1.
+
+ Returns:
+ torch.Tensor: The repeated tensor with final batch size (batch_size * num_images_per_prompt)
+
+ Raises:
+ ValueError: If input_tensor is not a torch.Tensor or has invalid batch size
+
+ Examples:
+ tensor = torch.tensor([[1, 2, 3]]) # shape: [1, 3] repeated = repeat_tensor_to_batch_size("image", tensor,
+ batch_size=2, num_images_per_prompt=2) repeated # tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) - shape:
+ [4, 3]
+
+ tensor = torch.tensor([[1, 2, 3], [4, 5, 6]]) # shape: [2, 3] repeated = repeat_tensor_to_batch_size("image",
+ tensor, batch_size=2, num_images_per_prompt=2) repeated # tensor([[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]])
+ - shape: [4, 3]
+ """
+ # make sure input is a tensor
+ if not isinstance(input_tensor, torch.Tensor):
+ raise ValueError(f"`{input_name}` must be a tensor")
+
+ # make sure input tensor e.g. image_latents has batch size 1 or batch_size same as prompts
+ if input_tensor.shape[0] == 1:
+ repeat_by = batch_size * num_images_per_prompt
+ elif input_tensor.shape[0] == batch_size:
+ repeat_by = num_images_per_prompt
+ else:
+ raise ValueError(
+ f"`{input_name}` must have have batch size 1 or {batch_size}, but got {input_tensor.shape[0]}"
+ )
+
+ # expand the tensor to match the batch_size * num_images_per_prompt
+ input_tensor = input_tensor.repeat_interleave(repeat_by, dim=0)
+
+ return input_tensor
+
+
+def calculate_dimension_from_latents(latents: torch.Tensor, vae_scale_factor_spatial: int) -> Tuple[int, int]:
+ """Calculate image dimensions from latent tensor dimensions.
+
+ This function converts latent spatial dimensions to image spatial dimensions by multiplying the latent height/width
+ by the VAE scale factor.
+
+ Args:
+ latents (torch.Tensor): The latent tensor. Must have 4 dimensions.
+ Expected shapes: [batch, channels, height, width]
+ vae_scale_factor (int): The scale factor used by the VAE to compress image spatial dimension.
+ By default, it is 16
+ Returns:
+ Tuple[int, int]: The calculated image dimensions as (height, width)
+ """
+ latent_height, latent_width = latents.shape[2:]
+ height = latent_height * vae_scale_factor_spatial // 2
+ width = latent_width * vae_scale_factor_spatial // 2
+
+ return height, width
+
+
+# Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift
+def calculate_shift(
+ image_seq_len,
+ base_seq_len: int = 256,
+ max_seq_len: int = 4096,
+ base_shift: float = 0.5,
+ max_shift: float = 1.15,
+):
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
+ b = base_shift - m * base_seq_len
+ mu = image_seq_len * m + b
+ return mu
+
+
+# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
+def retrieve_timesteps(
+ scheduler,
+ num_inference_steps: Optional[int] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ timesteps: Optional[List[int]] = None,
+ sigmas: Optional[List[float]] = None,
+ **kwargs,
+):
+ r"""
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
+
+ Args:
+ scheduler (`SchedulerMixin`):
+ The scheduler to get timesteps from.
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
+ must be `None`.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
+ `num_inference_steps` and `sigmas` must be `None`.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
+ `num_inference_steps` and `timesteps` must be `None`.
+
+ Returns:
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
+ second element is the number of inference steps.
+ """
+ if timesteps is not None and sigmas is not None:
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
+ if timesteps is not None:
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accepts_timesteps:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" timestep schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ elif sigmas is not None:
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accept_sigmas:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ else:
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ return timesteps, num_inference_steps
+
+
+class ZImageTextInputStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def description(self) -> str:
+ return (
+ "Input processing step that:\n"
+ " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n"
+ " 2. Adjusts input tensor shapes based on `batch_size` (number of prompts) and `num_images_per_prompt`\n\n"
+ "All input tensors are expected to have either batch_size=1 or match the batch_size\n"
+ "of prompt_embeds. The tensors will be duplicated across the batch dimension to\n"
+ "have a final batch_size of batch_size * num_images_per_prompt."
+ )
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("transformer", ZImageTransformer2DModel),
+ ]
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam("num_images_per_prompt", default=1),
+ InputParam(
+ "prompt_embeds",
+ required=True,
+ type_hint=List[torch.Tensor],
+ description="Pre-generated text embeddings. Can be generated from text_encoder step.",
+ ),
+ InputParam(
+ "negative_prompt_embeds",
+ type_hint=List[torch.Tensor],
+ description="Pre-generated negative text embeddings. Can be generated from text_encoder step.",
+ ),
+ ]
+
+ @property
+ def intermediate_outputs(self) -> List[str]:
+ return [
+ OutputParam(
+ "batch_size",
+ type_hint=int,
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt",
+ ),
+ OutputParam(
+ "dtype",
+ type_hint=torch.dtype,
+ description="Data type of model tensor inputs (determined by `transformer.dtype`)",
+ ),
+ ]
+
+ def check_inputs(self, components, block_state):
+ if block_state.prompt_embeds is not None and block_state.negative_prompt_embeds is not None:
+ if not isinstance(block_state.prompt_embeds, list):
+ raise ValueError(
+ f"`prompt_embeds` must be a list when passed directly, but got {type(block_state.prompt_embeds)}."
+ )
+ if not isinstance(block_state.negative_prompt_embeds, list):
+ raise ValueError(
+ f"`negative_prompt_embeds` must be a list when passed directly, but got {type(block_state.negative_prompt_embeds)}."
+ )
+ if len(block_state.prompt_embeds) != len(block_state.negative_prompt_embeds):
+ raise ValueError(
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same length when passed directly, but"
+ f" got: `prompt_embeds` {len(block_state.prompt_embeds)} != `negative_prompt_embeds`"
+ f" {len(block_state.negative_prompt_embeds)}."
+ )
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+ self.check_inputs(components, block_state)
+
+ block_state.batch_size = len(block_state.prompt_embeds)
+ block_state.dtype = block_state.prompt_embeds[0].dtype
+
+ if block_state.num_images_per_prompt > 1:
+ prompt_embeds = [pe for pe in block_state.prompt_embeds for _ in range(block_state.num_images_per_prompt)]
+ block_state.prompt_embeds = prompt_embeds
+
+ if block_state.negative_prompt_embeds is not None:
+ negative_prompt_embeds = [
+ npe for npe in block_state.negative_prompt_embeds for _ in range(block_state.num_images_per_prompt)
+ ]
+ block_state.negative_prompt_embeds = negative_prompt_embeds
+
+ self.set_block_state(state, block_state)
+
+ return components, state
+
+
+class ZImageAdditionalInputsStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ def __init__(
+ self,
+ image_latent_inputs: List[str] = ["image_latents"],
+ additional_batch_inputs: List[str] = [],
+ ):
+ """Initialize a configurable step that standardizes the inputs for the denoising step. It:\n"
+
+ This step handles multiple common tasks to prepare inputs for the denoising step:
+ 1. For encoded image latents, use it update height/width if None, and expands batch size
+ 2. For additional_batch_inputs: Only expands batch dimensions to match final batch size
+
+ This is a dynamic block that allows you to configure which inputs to process.
+
+ Args:
+ image_latent_inputs (List[str], optional): Names of image latent tensors to process.
+ In additional to adjust batch size of these inputs, they will be used to determine height/width. Can be
+ a single string or list of strings. Defaults to ["image_latents"].
+ additional_batch_inputs (List[str], optional):
+ Names of additional conditional input tensors to expand batch size. These tensors will only have their
+ batch dimensions adjusted to match the final batch size. Can be a single string or list of strings.
+ Defaults to [].
+
+ Examples:
+ # Configure to process image_latents (default behavior) ZImageAdditionalInputsStep()
+
+ # Configure to process multiple image latent inputs
+ ZImageAdditionalInputsStep(image_latent_inputs=["image_latents", "control_image_latents"])
+
+ # Configure to process image latents and additional batch inputs ZImageAdditionalInputsStep(
+ image_latent_inputs=["image_latents"], additional_batch_inputs=["image_embeds"]
+ )
+ """
+ if not isinstance(image_latent_inputs, list):
+ image_latent_inputs = [image_latent_inputs]
+ if not isinstance(additional_batch_inputs, list):
+ additional_batch_inputs = [additional_batch_inputs]
+
+ self._image_latent_inputs = image_latent_inputs
+ self._additional_batch_inputs = additional_batch_inputs
+ super().__init__()
+
+ @property
+ def description(self) -> str:
+ # Functionality section
+ summary_section = (
+ "Input processing step that:\n"
+ " 1. For image latent inputs: Updates height/width if None, and expands batch size\n"
+ " 2. For additional batch inputs: Expands batch dimensions to match final batch size"
+ )
+
+ # Inputs info
+ inputs_info = ""
+ if self._image_latent_inputs or self._additional_batch_inputs:
+ inputs_info = "\n\nConfigured inputs:"
+ if self._image_latent_inputs:
+ inputs_info += f"\n - Image latent inputs: {self._image_latent_inputs}"
+ if self._additional_batch_inputs:
+ inputs_info += f"\n - Additional batch inputs: {self._additional_batch_inputs}"
+
+ # Placement guidance
+ placement_section = "\n\nThis block should be placed after the encoder steps and the text input step."
+
+ return summary_section + inputs_info + placement_section
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ inputs = [
+ InputParam(name="num_images_per_prompt", default=1),
+ InputParam(name="batch_size", required=True),
+ InputParam(name="height"),
+ InputParam(name="width"),
+ ]
+
+ # Add image latent inputs
+ for image_latent_input_name in self._image_latent_inputs:
+ inputs.append(InputParam(name=image_latent_input_name))
+
+ # Add additional batch inputs
+ for input_name in self._additional_batch_inputs:
+ inputs.append(InputParam(name=input_name))
+
+ return inputs
+
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+
+ # Process image latent inputs (height/width calculation, patchify, and batch expansion)
+ for image_latent_input_name in self._image_latent_inputs:
+ image_latent_tensor = getattr(block_state, image_latent_input_name)
+ if image_latent_tensor is None:
+ continue
+
+ # 1. Calculate num_frames, height/width from latents
+ height, width = calculate_dimension_from_latents(image_latent_tensor, components.vae_scale_factor_spatial)
+ block_state.height = block_state.height or height
+ block_state.width = block_state.width or width
+
+ # Process additional batch inputs (only batch expansion)
+ for input_name in self._additional_batch_inputs:
+ input_tensor = getattr(block_state, input_name)
+ if input_tensor is None:
+ continue
+
+ # Only expand batch size
+ input_tensor = repeat_tensor_to_batch_size(
+ input_name=input_name,
+ input_tensor=input_tensor,
+ num_images_per_prompt=block_state.num_images_per_prompt,
+ batch_size=block_state.batch_size,
+ )
+
+ setattr(block_state, input_name, input_tensor)
+
+ self.set_block_state(state, block_state)
+ return components, state
+
+
+class ZImagePrepareLatentsStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def description(self) -> str:
+ return "Prepare latents step that prepares the latents for the text-to-video generation process"
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam("height", type_hint=int),
+ InputParam("width", type_hint=int),
+ InputParam("latents", type_hint=Optional[torch.Tensor]),
+ InputParam("num_images_per_prompt", type_hint=int, default=1),
+ InputParam("generator"),
+ InputParam(
+ "batch_size",
+ required=True,
+ type_hint=int,
+ description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`. Can be generated in input step.",
+ ),
+ InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"),
+ ]
+
+ @property
+ def intermediate_outputs(self) -> List[OutputParam]:
+ return [
+ OutputParam(
+ "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process"
+ )
+ ]
+
+ def check_inputs(self, components, block_state):
+ if (block_state.height is not None and block_state.height % components.vae_scale_factor_spatial != 0) or (
+ block_state.width is not None and block_state.width % components.vae_scale_factor_spatial != 0
+ ):
+ raise ValueError(
+ f"`height` and `width` have to be divisible by {components.vae_scale_factor_spatial} but are {block_state.height} and {block_state.width}."
+ )
+
+ @staticmethod
+ # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline.prepare_latents with self->comp
+ def prepare_latents(
+ comp,
+ batch_size,
+ num_channels_latents,
+ height,
+ width,
+ dtype,
+ device,
+ generator,
+ latents=None,
+ ):
+ height = 2 * (int(height) // (comp.vae_scale_factor * 2))
+ width = 2 * (int(width) // (comp.vae_scale_factor * 2))
+
+ shape = (batch_size, num_channels_latents, height, width)
+
+ if latents is None:
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ else:
+ if latents.shape != shape:
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
+ latents = latents.to(device)
+ return latents
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+ self.check_inputs(components, block_state)
+
+ device = components._execution_device
+ dtype = torch.float32
+
+ block_state.height = block_state.height or components.default_height
+ block_state.width = block_state.width or components.default_width
+
+ block_state.latents = self.prepare_latents(
+ components,
+ batch_size=block_state.batch_size * block_state.num_images_per_prompt,
+ num_channels_latents=components.num_channels_latents,
+ height=block_state.height,
+ width=block_state.width,
+ dtype=dtype,
+ device=device,
+ generator=block_state.generator,
+ latents=block_state.latents,
+ )
+
+ self.set_block_state(state, block_state)
+
+ return components, state
+
+
+class ZImageSetTimestepsStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler),
+ ]
+
+ @property
+ def description(self) -> str:
+ return "Step that sets the scheduler's timesteps for inference. Need to run after prepare latents step."
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam("latents", required=True),
+ InputParam("num_inference_steps", default=9),
+ InputParam("sigmas"),
+ ]
+
+ @property
+ def intermediate_outputs(self) -> List[OutputParam]:
+ return [
+ OutputParam(
+ "timesteps", type_hint=torch.Tensor, description="The timesteps to use for the denoising process"
+ ),
+ ]
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+ device = components._execution_device
+
+ latent_height, latent_width = block_state.latents.shape[2], block_state.latents.shape[3]
+ image_seq_len = (latent_height // 2) * (latent_width // 2) # sequence length after patchify
+
+ mu = calculate_shift(
+ image_seq_len,
+ base_seq_len=components.scheduler.config.get("base_image_seq_len", 256),
+ max_seq_len=components.scheduler.config.get("max_image_seq_len", 4096),
+ base_shift=components.scheduler.config.get("base_shift", 0.5),
+ max_shift=components.scheduler.config.get("max_shift", 1.15),
+ )
+ components.scheduler.sigma_min = 0.0
+
+ block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps(
+ components.scheduler,
+ block_state.num_inference_steps,
+ device,
+ sigmas=block_state.sigmas,
+ mu=mu,
+ )
+
+ self.set_block_state(state, block_state)
+ return components, state
+
+
+class ZImageSetTimestepsWithStrengthStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler),
+ ]
+
+ @property
+ def description(self) -> str:
+ return "Step that sets the scheduler's timesteps for inference with strength. Need to run after set timesteps step."
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam("timesteps", required=True),
+ InputParam("num_inference_steps", required=True),
+ InputParam("strength", default=0.6),
+ ]
+
+ def check_inputs(self, components, block_state):
+ if block_state.strength < 0.0 or block_state.strength > 1.0:
+ raise ValueError(f"Strength must be between 0.0 and 1.0, but got {block_state.strength}")
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+ self.check_inputs(components, block_state)
+
+ init_timestep = min(block_state.num_inference_steps * block_state.strength, block_state.num_inference_steps)
+
+ t_start = int(max(block_state.num_inference_steps - init_timestep, 0))
+ timesteps = components.scheduler.timesteps[t_start * components.scheduler.order :]
+ if hasattr(components.scheduler, "set_begin_index"):
+ components.scheduler.set_begin_index(t_start * components.scheduler.order)
+
+ block_state.timesteps = timesteps
+ block_state.num_inference_steps = block_state.num_inference_steps - t_start
+
+ self.set_block_state(state, block_state)
+ return components, state
+
+
+class ZImagePrepareLatentswithImageStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def description(self) -> str:
+ return "step that prepares the latents with image condition, need to run after set timesteps and prepare latents step."
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam("latents", required=True),
+ InputParam("image_latents", required=True),
+ InputParam("timesteps", required=True),
+ ]
+
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+
+ latent_timestep = block_state.timesteps[:1].repeat(block_state.latents.shape[0])
+ block_state.latents = components.scheduler.scale_noise(
+ block_state.image_latents, latent_timestep, block_state.latents
+ )
+
+ self.set_block_state(state, block_state)
+ return components, state
diff --git a/src/diffusers/modular_pipelines/z_image/decoders.py b/src/diffusers/modular_pipelines/z_image/decoders.py
new file mode 100644
index 0000000000..cdb6a2e5ea
--- /dev/null
+++ b/src/diffusers/modular_pipelines/z_image/decoders.py
@@ -0,0 +1,91 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, List, Tuple, Union
+
+import numpy as np
+import PIL
+import torch
+
+from ...configuration_utils import FrozenDict
+from ...image_processor import VaeImageProcessor
+from ...models import AutoencoderKL
+from ...utils import logging
+from ..modular_pipeline import ModularPipelineBlocks, PipelineState
+from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+class ZImageVaeDecoderStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("vae", AutoencoderKL),
+ ComponentSpec(
+ "image_processor",
+ VaeImageProcessor,
+ config=FrozenDict({"vae_scale_factor": 8 * 2}),
+ default_creation_method="from_config",
+ ),
+ ]
+
+ @property
+ def description(self) -> str:
+ return "Step that decodes the denoised latents into images"
+
+ @property
+ def inputs(self) -> List[Tuple[str, Any]]:
+ return [
+ InputParam(
+ "latents",
+ required=True,
+ ),
+ InputParam(
+ name="output_type",
+ default="pil",
+ type_hint=str,
+ description="The type of the output images, can be 'pil', 'np', 'pt'",
+ ),
+ ]
+
+ @property
+ def intermediate_outputs(self) -> List[str]:
+ return [
+ OutputParam(
+ "images",
+ type_hint=Union[List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]],
+ description="The generated images, can be a PIL.Image.Image, torch.Tensor or a numpy array",
+ )
+ ]
+
+ @torch.no_grad()
+ def __call__(self, components, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+ vae_dtype = components.vae.dtype
+
+ latents = block_state.latents.to(vae_dtype)
+ latents = latents / components.vae.config.scaling_factor + components.vae.config.shift_factor
+
+ block_state.images = components.vae.decode(latents, return_dict=False)[0]
+ block_state.images = components.image_processor.postprocess(
+ block_state.images, output_type=block_state.output_type
+ )
+
+ self.set_block_state(state, block_state)
+
+ return components, state
diff --git a/src/diffusers/modular_pipelines/z_image/denoise.py b/src/diffusers/modular_pipelines/z_image/denoise.py
new file mode 100644
index 0000000000..ec815f77ad
--- /dev/null
+++ b/src/diffusers/modular_pipelines/z_image/denoise.py
@@ -0,0 +1,310 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, List, Tuple
+
+import torch
+
+from ...configuration_utils import FrozenDict
+from ...guiders import ClassifierFreeGuidance
+from ...models import ZImageTransformer2DModel
+from ...schedulers import FlowMatchEulerDiscreteScheduler
+from ...utils import logging
+from ..modular_pipeline import (
+ BlockState,
+ LoopSequentialPipelineBlocks,
+ ModularPipelineBlocks,
+ PipelineState,
+)
+from ..modular_pipeline_utils import ComponentSpec, InputParam
+from .modular_pipeline import ZImageModularPipeline
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+class ZImageLoopBeforeDenoiser(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def description(self) -> str:
+ return (
+ "step within the denoising loop that prepares the latent input for the denoiser. "
+ "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` "
+ "object (e.g. `ZImageDenoiseLoopWrapper`)"
+ )
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam(
+ "latents",
+ required=True,
+ type_hint=torch.Tensor,
+ description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.",
+ ),
+ InputParam(
+ "dtype",
+ required=True,
+ type_hint=torch.dtype,
+ description="The dtype of the model inputs. Can be generated in input step.",
+ ),
+ ]
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor):
+ latents = block_state.latents.unsqueeze(2).to(
+ block_state.dtype
+ ) # [batch_size, num_channels, 1, height, width]
+ block_state.latent_model_input = list(latents.unbind(dim=0)) # list of [num_channels, 1, height, width]
+
+ timestep = t.expand(latents.shape[0]).to(block_state.dtype)
+ timestep = (1000 - timestep) / 1000
+ block_state.timestep = timestep
+ return components, block_state
+
+
+class ZImageLoopDenoiser(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ def __init__(
+ self,
+ guider_input_fields: Dict[str, Any] = {"cap_feats": ("prompt_embeds", "negative_prompt_embeds")},
+ ):
+ """Initialize a denoiser block that calls the denoiser model. This block is used in Z-Image.
+
+ Args:
+ guider_input_fields: A dictionary that maps each argument expected by the denoiser model
+ (for example, "encoder_hidden_states") to data stored on 'block_state'. The value can be either:
+
+ - A tuple of strings. For instance, {"encoder_hidden_states": ("prompt_embeds",
+ "negative_prompt_embeds")} tells the guider to read `block_state.prompt_embeds` and
+ `block_state.negative_prompt_embeds` and pass them as the conditional and unconditional batches of
+ 'encoder_hidden_states'.
+ - A string. For example, {"encoder_hidden_image": "image_embeds"} makes the guider forward
+ `block_state.image_embeds` for both conditional and unconditional batches.
+ """
+ if not isinstance(guider_input_fields, dict):
+ raise ValueError(f"guider_input_fields must be a dictionary but is {type(guider_input_fields)}")
+ self._guider_input_fields = guider_input_fields
+ super().__init__()
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec(
+ "guider",
+ ClassifierFreeGuidance,
+ config=FrozenDict({"guidance_scale": 5.0, "enabled": False}),
+ default_creation_method="from_config",
+ ),
+ ComponentSpec("transformer", ZImageTransformer2DModel),
+ ]
+
+ @property
+ def description(self) -> str:
+ return (
+ "Step within the denoising loop that denoise the latents with guidance. "
+ "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` "
+ "object (e.g. `ZImageDenoiseLoopWrapper`)"
+ )
+
+ @property
+ def inputs(self) -> List[Tuple[str, Any]]:
+ inputs = [
+ InputParam(
+ "num_inference_steps",
+ required=True,
+ type_hint=int,
+ description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.",
+ ),
+ ]
+ guider_input_names = []
+ uncond_guider_input_names = []
+ for value in self._guider_input_fields.values():
+ if isinstance(value, tuple):
+ guider_input_names.append(value[0])
+ uncond_guider_input_names.append(value[1])
+ else:
+ guider_input_names.append(value)
+
+ for name in guider_input_names:
+ inputs.append(InputParam(name=name, required=True))
+ for name in uncond_guider_input_names:
+ inputs.append(InputParam(name=name))
+ return inputs
+
+ @torch.no_grad()
+ def __call__(
+ self, components: ZImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor
+ ) -> PipelineState:
+ components.guider.set_state(step=i, num_inference_steps=block_state.num_inference_steps, timestep=t)
+
+ # The guider splits model inputs into separate batches for conditional/unconditional predictions.
+ # For CFG with guider_inputs = {"encoder_hidden_states": (prompt_embeds, negative_prompt_embeds)}:
+ # you will get a guider_state with two batches:
+ # guider_state = [
+ # {"encoder_hidden_states": prompt_embeds, "__guidance_identifier__": "pred_cond"}, # conditional batch
+ # {"encoder_hidden_states": negative_prompt_embeds, "__guidance_identifier__": "pred_uncond"}, # unconditional batch
+ # ]
+ # Other guidance methods may return 1 batch (no guidance) or 3+ batches (e.g., PAG, APG).
+ guider_state = components.guider.prepare_inputs_from_block_state(block_state, self._guider_input_fields)
+
+ # run the denoiser for each guidance batch
+ for guider_state_batch in guider_state:
+ components.guider.prepare_models(components.transformer)
+ cond_kwargs = guider_state_batch.as_dict()
+
+ def _convert_dtype(v, dtype):
+ if isinstance(v, torch.Tensor):
+ return v.to(dtype)
+ elif isinstance(v, list):
+ return [_convert_dtype(t, dtype) for t in v]
+ return v
+
+ cond_kwargs = {
+ k: _convert_dtype(v, block_state.dtype)
+ for k, v in cond_kwargs.items()
+ if k in self._guider_input_fields.keys()
+ }
+
+ # Predict the noise residual
+ # store the noise_pred in guider_state_batch so that we can apply guidance across all batches
+ model_out_list = components.transformer(
+ x=block_state.latent_model_input,
+ t=block_state.timestep,
+ return_dict=False,
+ **cond_kwargs,
+ )[0]
+ noise_pred = torch.stack(model_out_list, dim=0).squeeze(2)
+ guider_state_batch.noise_pred = -noise_pred
+ components.guider.cleanup_models(components.transformer)
+
+ # Perform guidance
+ block_state.noise_pred = components.guider(guider_state)[0]
+
+ return components, block_state
+
+
+class ZImageLoopAfterDenoiser(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler),
+ ]
+
+ @property
+ def description(self) -> str:
+ return (
+ "step within the denoising loop that update the latents. "
+ "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` "
+ "object (e.g. `ZImageDenoiseLoopWrapper`)"
+ )
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor):
+ # Perform scheduler step using the predicted output
+ latents_dtype = block_state.latents.dtype
+ block_state.latents = components.scheduler.step(
+ block_state.noise_pred.float(),
+ t,
+ block_state.latents.float(),
+ return_dict=False,
+ )[0]
+
+ if block_state.latents.dtype != latents_dtype:
+ block_state.latents = block_state.latents.to(latents_dtype)
+
+ return components, block_state
+
+
+class ZImageDenoiseLoopWrapper(LoopSequentialPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def description(self) -> str:
+ return (
+ "Pipeline block that iteratively denoise the latents over `timesteps`. "
+ "The specific steps with each iteration can be customized with `sub_blocks` attributes"
+ )
+
+ @property
+ def loop_expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler),
+ ]
+
+ @property
+ def loop_inputs(self) -> List[InputParam]:
+ return [
+ InputParam(
+ "timesteps",
+ required=True,
+ type_hint=torch.Tensor,
+ description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.",
+ ),
+ InputParam(
+ "num_inference_steps",
+ required=True,
+ type_hint=int,
+ description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.",
+ ),
+ ]
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+
+ block_state.num_warmup_steps = max(
+ len(block_state.timesteps) - block_state.num_inference_steps * components.scheduler.order, 0
+ )
+
+ with self.progress_bar(total=block_state.num_inference_steps) as progress_bar:
+ for i, t in enumerate(block_state.timesteps):
+ components, block_state = self.loop_step(components, block_state, i=i, t=t)
+ if i == len(block_state.timesteps) - 1 or (
+ (i + 1) > block_state.num_warmup_steps and (i + 1) % components.scheduler.order == 0
+ ):
+ progress_bar.update()
+
+ self.set_block_state(state, block_state)
+
+ return components, state
+
+
+class ZImageDenoiseStep(ZImageDenoiseLoopWrapper):
+ block_classes = [
+ ZImageLoopBeforeDenoiser,
+ ZImageLoopDenoiser(
+ guider_input_fields={
+ "cap_feats": ("prompt_embeds", "negative_prompt_embeds"),
+ }
+ ),
+ ZImageLoopAfterDenoiser,
+ ]
+ block_names = ["before_denoiser", "denoiser", "after_denoiser"]
+
+ @property
+ def description(self) -> str:
+ return (
+ "Denoise step that iteratively denoise the latents. \n"
+ "Its loop logic is defined in `ZImageDenoiseLoopWrapper.__call__` method \n"
+ "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n"
+ " - `ZImageLoopBeforeDenoiser`\n"
+ " - `ZImageLoopDenoiser`\n"
+ " - `ZImageLoopAfterDenoiser`\n"
+ "This block supports text-to-image and image-to-image tasks for Z-Image."
+ )
diff --git a/src/diffusers/modular_pipelines/z_image/encoders.py b/src/diffusers/modular_pipelines/z_image/encoders.py
new file mode 100644
index 0000000000..f5769fe2de
--- /dev/null
+++ b/src/diffusers/modular_pipelines/z_image/encoders.py
@@ -0,0 +1,344 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Optional, Union
+
+import PIL
+import torch
+from transformers import Qwen2Tokenizer, Qwen3Model
+
+from ...configuration_utils import FrozenDict
+from ...guiders import ClassifierFreeGuidance
+from ...image_processor import VaeImageProcessor
+from ...models import AutoencoderKL
+from ...utils import is_ftfy_available, logging
+from ..modular_pipeline import ModularPipelineBlocks, PipelineState
+from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam
+from .modular_pipeline import ZImageModularPipeline
+
+
+if is_ftfy_available():
+ pass
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+def get_qwen_prompt_embeds(
+ text_encoder: Qwen3Model,
+ tokenizer: Qwen2Tokenizer,
+ prompt: Union[str, List[str]],
+ device: torch.device,
+ max_sequence_length: int = 512,
+) -> List[torch.Tensor]:
+ prompt = [prompt] if isinstance(prompt, str) else prompt
+
+ for i, prompt_item in enumerate(prompt):
+ messages = [
+ {"role": "user", "content": prompt_item},
+ ]
+ prompt_item = tokenizer.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True,
+ enable_thinking=True,
+ )
+ prompt[i] = prompt_item
+
+ text_inputs = tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=max_sequence_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ text_input_ids = text_inputs.input_ids.to(device)
+ prompt_masks = text_inputs.attention_mask.to(device).bool()
+
+ prompt_embeds = text_encoder(
+ input_ids=text_input_ids,
+ attention_mask=prompt_masks,
+ output_hidden_states=True,
+ ).hidden_states[-2]
+
+ prompt_embeds_list = []
+
+ for i in range(len(prompt_embeds)):
+ prompt_embeds_list.append(prompt_embeds[i][prompt_masks[i]])
+
+ return prompt_embeds_list
+
+
+# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
+def retrieve_latents(
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
+):
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
+ return encoder_output.latent_dist.sample(generator)
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
+ return encoder_output.latent_dist.mode()
+ elif hasattr(encoder_output, "latents"):
+ return encoder_output.latents
+ else:
+ raise AttributeError("Could not access latents of provided encoder_output")
+
+
+def encode_vae_image(
+ image_tensor: torch.Tensor,
+ vae: AutoencoderKL,
+ generator: torch.Generator,
+ device: torch.device,
+ dtype: torch.dtype,
+ latent_channels: int = 16,
+):
+ if not isinstance(image_tensor, torch.Tensor):
+ raise ValueError(f"Expected image_tensor to be a tensor, got {type(image_tensor)}.")
+
+ if isinstance(generator, list) and len(generator) != image_tensor.shape[0]:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but it is not same as number of images {image_tensor.shape[0]}."
+ )
+
+ image_tensor = image_tensor.to(device=device, dtype=dtype)
+
+ if isinstance(generator, list):
+ image_latents = [
+ retrieve_latents(vae.encode(image_tensor[i : i + 1]), generator=generator[i])
+ for i in range(image_tensor.shape[0])
+ ]
+ image_latents = torch.cat(image_latents, dim=0)
+ else:
+ image_latents = retrieve_latents(vae.encode(image_tensor), generator=generator)
+
+ image_latents = (image_latents - vae.config.shift_factor) * vae.config.scaling_factor
+
+ return image_latents
+
+
+class ZImageTextEncoderStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def description(self) -> str:
+ return "Text Encoder step that generate text_embeddings to guide the video generation"
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("text_encoder", Qwen3Model),
+ ComponentSpec("tokenizer", Qwen2Tokenizer),
+ ComponentSpec(
+ "guider",
+ ClassifierFreeGuidance,
+ config=FrozenDict({"guidance_scale": 5.0, "enabled": False}),
+ default_creation_method="from_config",
+ ),
+ ]
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam("prompt"),
+ InputParam("negative_prompt"),
+ InputParam("max_sequence_length", default=512),
+ ]
+
+ @property
+ def intermediate_outputs(self) -> List[OutputParam]:
+ return [
+ OutputParam(
+ "prompt_embeds",
+ type_hint=List[torch.Tensor],
+ kwargs_type="denoiser_input_fields",
+ description="text embeddings used to guide the image generation",
+ ),
+ OutputParam(
+ "negative_prompt_embeds",
+ type_hint=List[torch.Tensor],
+ kwargs_type="denoiser_input_fields",
+ description="negative text embeddings used to guide the image generation",
+ ),
+ ]
+
+ @staticmethod
+ def check_inputs(block_state):
+ if block_state.prompt is not None and (
+ not isinstance(block_state.prompt, str) and not isinstance(block_state.prompt, list)
+ ):
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(block_state.prompt)}")
+
+ @staticmethod
+ def encode_prompt(
+ components,
+ prompt: str,
+ device: Optional[torch.device] = None,
+ prepare_unconditional_embeds: bool = True,
+ negative_prompt: Optional[str] = None,
+ max_sequence_length: int = 512,
+ ):
+ r"""
+ Encodes the prompt into text encoder hidden states.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ prompt to be encoded
+ device: (`torch.device`):
+ torch device
+ prepare_unconditional_embeds (`bool`):
+ whether to use prepare unconditional embeddings or not
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ max_sequence_length (`int`, defaults to `512`):
+ The maximum number of text tokens to be used for the generation process.
+ """
+ device = device or components._execution_device
+ if not isinstance(prompt, list):
+ prompt = [prompt]
+ batch_size = len(prompt)
+
+ prompt_embeds = get_qwen_prompt_embeds(
+ text_encoder=components.text_encoder,
+ tokenizer=components.tokenizer,
+ prompt=prompt,
+ max_sequence_length=max_sequence_length,
+ device=device,
+ )
+
+ negative_prompt_embeds = None
+ if prepare_unconditional_embeds:
+ negative_prompt = negative_prompt or ""
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
+
+ if prompt is not None and type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+
+ negative_prompt_embeds = get_qwen_prompt_embeds(
+ text_encoder=components.text_encoder,
+ tokenizer=components.tokenizer,
+ prompt=negative_prompt,
+ max_sequence_length=max_sequence_length,
+ device=device,
+ )
+
+ return prompt_embeds, negative_prompt_embeds
+
+ @torch.no_grad()
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ # Get inputs and intermediates
+ block_state = self.get_block_state(state)
+ self.check_inputs(block_state)
+
+ block_state.device = components._execution_device
+
+ # Encode input prompt
+ (
+ block_state.prompt_embeds,
+ block_state.negative_prompt_embeds,
+ ) = self.encode_prompt(
+ components=components,
+ prompt=block_state.prompt,
+ device=block_state.device,
+ prepare_unconditional_embeds=components.requires_unconditional_embeds,
+ negative_prompt=block_state.negative_prompt,
+ max_sequence_length=block_state.max_sequence_length,
+ )
+
+ # Add outputs
+ self.set_block_state(state, block_state)
+ return components, state
+
+
+class ZImageVaeImageEncoderStep(ModularPipelineBlocks):
+ model_name = "z-image"
+
+ @property
+ def description(self) -> str:
+ return "Vae Image Encoder step that generate condition_latents based on image to guide the image generation"
+
+ @property
+ def expected_components(self) -> List[ComponentSpec]:
+ return [
+ ComponentSpec("vae", AutoencoderKL),
+ ComponentSpec(
+ "image_processor",
+ VaeImageProcessor,
+ config=FrozenDict({"vae_scale_factor": 8 * 2}),
+ default_creation_method="from_config",
+ ),
+ ]
+
+ @property
+ def inputs(self) -> List[InputParam]:
+ return [
+ InputParam("image", type_hint=PIL.Image.Image, required=True),
+ InputParam("height"),
+ InputParam("width"),
+ InputParam("generator"),
+ ]
+
+ @property
+ def intermediate_outputs(self) -> List[OutputParam]:
+ return [
+ OutputParam(
+ "image_latents",
+ type_hint=torch.Tensor,
+ description="video latent representation with the first frame image condition",
+ ),
+ ]
+
+ @staticmethod
+ def check_inputs(components, block_state):
+ if (block_state.height is not None and block_state.height % components.vae_scale_factor_spatial != 0) or (
+ block_state.width is not None and block_state.width % components.vae_scale_factor_spatial != 0
+ ):
+ raise ValueError(
+ f"`height` and `width` have to be divisible by {components.vae_scale_factor_spatial} but are {block_state.height} and {block_state.width}."
+ )
+
+ def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState:
+ block_state = self.get_block_state(state)
+ self.check_inputs(components, block_state)
+
+ image = block_state.image
+
+ device = components._execution_device
+ dtype = torch.float32
+ vae_dtype = components.vae.dtype
+
+ image_tensor = components.image_processor.preprocess(
+ image, height=block_state.height, width=block_state.width
+ ).to(device=device, dtype=dtype)
+
+ block_state.image_latents = encode_vae_image(
+ image_tensor=image_tensor,
+ vae=components.vae,
+ generator=block_state.generator,
+ device=device,
+ dtype=vae_dtype,
+ latent_channels=components.num_channels_latents,
+ )
+
+ self.set_block_state(state, block_state)
+ return components, state
diff --git a/src/diffusers/modular_pipelines/z_image/modular_blocks.py b/src/diffusers/modular_pipelines/z_image/modular_blocks.py
new file mode 100644
index 0000000000..a7c520301a
--- /dev/null
+++ b/src/diffusers/modular_pipelines/z_image/modular_blocks.py
@@ -0,0 +1,191 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils import logging
+from ..modular_pipeline import AutoPipelineBlocks, SequentialPipelineBlocks
+from ..modular_pipeline_utils import InsertableDict
+from .before_denoise import (
+ ZImageAdditionalInputsStep,
+ ZImagePrepareLatentsStep,
+ ZImagePrepareLatentswithImageStep,
+ ZImageSetTimestepsStep,
+ ZImageSetTimestepsWithStrengthStep,
+ ZImageTextInputStep,
+)
+from .decoders import ZImageVaeDecoderStep
+from .denoise import (
+ ZImageDenoiseStep,
+)
+from .encoders import (
+ ZImageTextEncoderStep,
+ ZImageVaeImageEncoderStep,
+)
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+# z-image
+# text2image
+class ZImageCoreDenoiseStep(SequentialPipelineBlocks):
+ block_classes = [
+ ZImageTextInputStep,
+ ZImagePrepareLatentsStep,
+ ZImageSetTimestepsStep,
+ ZImageDenoiseStep,
+ ]
+ block_names = ["input", "prepare_latents", "set_timesteps", "denoise"]
+
+ @property
+ def description(self):
+ return (
+ "denoise block that takes encoded conditions and runs the denoising process.\n"
+ + "This is a sequential pipeline blocks:\n"
+ + " - `ZImageTextInputStep` is used to adjust the batch size of the model inputs\n"
+ + " - `ZImagePrepareLatentsStep` is used to prepare the latents\n"
+ + " - `ZImageSetTimestepsStep` is used to set the timesteps\n"
+ + " - `ZImageDenoiseStep` is used to denoise the latents\n"
+ )
+
+
+# z-image: image2image
+## denoise
+class ZImageImage2ImageCoreDenoiseStep(SequentialPipelineBlocks):
+ block_classes = [
+ ZImageTextInputStep,
+ ZImageAdditionalInputsStep(image_latent_inputs=["image_latents"]),
+ ZImagePrepareLatentsStep,
+ ZImageSetTimestepsStep,
+ ZImageSetTimestepsWithStrengthStep,
+ ZImagePrepareLatentswithImageStep,
+ ZImageDenoiseStep,
+ ]
+ block_names = [
+ "input",
+ "additional_inputs",
+ "prepare_latents",
+ "set_timesteps",
+ "set_timesteps_with_strength",
+ "prepare_latents_with_image",
+ "denoise",
+ ]
+
+ @property
+ def description(self):
+ return (
+ "denoise block that takes encoded text and image latent conditions and runs the denoising process.\n"
+ + "This is a sequential pipeline blocks:\n"
+ + " - `ZImageTextInputStep` is used to adjust the batch size of the model inputs\n"
+ + " - `ZImageAdditionalInputsStep` is used to adjust the batch size of the latent conditions\n"
+ + " - `ZImagePrepareLatentsStep` is used to prepare the latents\n"
+ + " - `ZImageSetTimestepsStep` is used to set the timesteps\n"
+ + " - `ZImageSetTimestepsWithStrengthStep` is used to set the timesteps with strength\n"
+ + " - `ZImagePrepareLatentswithImageStep` is used to prepare the latents with image\n"
+ + " - `ZImageDenoiseStep` is used to denoise the latents\n"
+ )
+
+
+## auto blocks
+class ZImageAutoDenoiseStep(AutoPipelineBlocks):
+ block_classes = [
+ ZImageImage2ImageCoreDenoiseStep,
+ ZImageCoreDenoiseStep,
+ ]
+ block_names = ["image2image", "text2image"]
+ block_trigger_inputs = ["image_latents", None]
+
+ @property
+ def description(self) -> str:
+ return (
+ "Denoise step that iteratively denoise the latents. "
+ "This is a auto pipeline block that works for text2image and image2image tasks."
+ " - `ZImageCoreDenoiseStep` (text2image) for text2image tasks."
+ " - `ZImageImage2ImageCoreDenoiseStep` (image2image) for image2image tasks."
+ + " - if `image_latents` is provided, `ZImageImage2ImageCoreDenoiseStep` will be used.\n"
+ + " - if `image_latents` is not provided, `ZImageCoreDenoiseStep` will be used.\n"
+ )
+
+
+class ZImageAutoVaeImageEncoderStep(AutoPipelineBlocks):
+ block_classes = [ZImageVaeImageEncoderStep]
+ block_names = ["vae_image_encoder"]
+ block_trigger_inputs = ["image"]
+
+ @property
+ def description(self) -> str:
+ return "Vae Image Encoder step that encode the image to generate the image latents"
+ +"This is an auto pipeline block that works for image2image tasks."
+ +" - `ZImageVaeImageEncoderStep` is used when `image` is provided."
+ +" - if `image` is not provided, step will be skipped."
+
+
+class ZImageAutoBlocks(SequentialPipelineBlocks):
+ block_classes = [
+ ZImageTextEncoderStep,
+ ZImageAutoVaeImageEncoderStep,
+ ZImageAutoDenoiseStep,
+ ZImageVaeDecoderStep,
+ ]
+ block_names = ["text_encoder", "vae_image_encoder", "denoise", "decode"]
+
+ @property
+ def description(self) -> str:
+ return "Auto Modular pipeline for text-to-image and image-to-image using ZImage.\n"
+ +" - for text-to-image generation, all you need to provide is `prompt`\n"
+ +" - for image-to-image generation, you need to provide `image`\n"
+ +" - if `image` is not provided, step will be skipped."
+
+
+# presets
+TEXT2IMAGE_BLOCKS = InsertableDict(
+ [
+ ("text_encoder", ZImageTextEncoderStep),
+ ("input", ZImageTextInputStep),
+ ("prepare_latents", ZImagePrepareLatentsStep),
+ ("set_timesteps", ZImageSetTimestepsStep),
+ ("denoise", ZImageDenoiseStep),
+ ("decode", ZImageVaeDecoderStep),
+ ]
+)
+
+IMAGE2IMAGE_BLOCKS = InsertableDict(
+ [
+ ("text_encoder", ZImageTextEncoderStep),
+ ("vae_image_encoder", ZImageVaeImageEncoderStep),
+ ("input", ZImageTextInputStep),
+ ("additional_inputs", ZImageAdditionalInputsStep(image_latent_inputs=["image_latents"])),
+ ("prepare_latents", ZImagePrepareLatentsStep),
+ ("set_timesteps", ZImageSetTimestepsStep),
+ ("set_timesteps_with_strength", ZImageSetTimestepsWithStrengthStep),
+ ("prepare_latents_with_image", ZImagePrepareLatentswithImageStep),
+ ("denoise", ZImageDenoiseStep),
+ ("decode", ZImageVaeDecoderStep),
+ ]
+)
+
+
+AUTO_BLOCKS = InsertableDict(
+ [
+ ("text_encoder", ZImageTextEncoderStep),
+ ("vae_image_encoder", ZImageAutoVaeImageEncoderStep),
+ ("denoise", ZImageAutoDenoiseStep),
+ ("decode", ZImageVaeDecoderStep),
+ ]
+)
+
+ALL_BLOCKS = {
+ "text2image": TEXT2IMAGE_BLOCKS,
+ "image2image": IMAGE2IMAGE_BLOCKS,
+ "auto": AUTO_BLOCKS,
+}
diff --git a/src/diffusers/modular_pipelines/z_image/modular_pipeline.py b/src/diffusers/modular_pipelines/z_image/modular_pipeline.py
new file mode 100644
index 0000000000..f1d8e53a36
--- /dev/null
+++ b/src/diffusers/modular_pipelines/z_image/modular_pipeline.py
@@ -0,0 +1,72 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...loaders import ZImageLoraLoaderMixin
+from ...utils import logging
+from ..modular_pipeline import ModularPipeline
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+class ZImageModularPipeline(
+ ModularPipeline,
+ ZImageLoraLoaderMixin,
+):
+ """
+ A ModularPipeline for Z-Image.
+
+ > [!WARNING] > This is an experimental feature and is likely to change in the future.
+ """
+
+ default_blocks_name = "ZImageAutoBlocks"
+
+ @property
+ def default_height(self):
+ return 1024
+
+ @property
+ def default_width(self):
+ return 1024
+
+ @property
+ def vae_scale_factor_spatial(self):
+ vae_scale_factor_spatial = 16
+ if hasattr(self, "image_processor") and self.image_processor is not None:
+ vae_scale_factor_spatial = self.image_processor.config.vae_scale_factor
+ return vae_scale_factor_spatial
+
+ @property
+ def vae_scale_factor(self):
+ vae_scale_factor = 8
+ if hasattr(self, "vae") and self.vae is not None:
+ vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ return vae_scale_factor
+
+ @property
+ def num_channels_latents(self):
+ num_channels_latents = 16
+ if hasattr(self, "transformer") and self.transformer is not None:
+ num_channels_latents = self.transformer.config.in_channels
+ return num_channels_latents
+
+ @property
+ def requires_unconditional_embeds(self):
+ requires_unconditional_embeds = False
+
+ if hasattr(self, "guider") and self.guider is not None:
+ requires_unconditional_embeds = self.guider._enabled and self.guider.num_conditions > 1
+
+ return requires_unconditional_embeds
diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py
index 3d669aecf5..388551f812 100644
--- a/src/diffusers/pipelines/__init__.py
+++ b/src/diffusers/pipelines/__init__.py
@@ -404,7 +404,7 @@ else:
"Kandinsky5T2IPipeline",
"Kandinsky5I2IPipeline",
]
- _import_structure["z_image"] = ["ZImagePipeline"]
+ _import_structure["z_image"] = ["ZImageImg2ImgPipeline", "ZImagePipeline"]
_import_structure["skyreels_v2"] = [
"SkyReelsV2DiffusionForcingPipeline",
"SkyReelsV2DiffusionForcingImageToVideoPipeline",
@@ -841,7 +841,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
WuerstchenDecoderPipeline,
WuerstchenPriorPipeline,
)
- from .z_image import ZImagePipeline
+ from .z_image import ZImageImg2ImgPipeline, ZImagePipeline
try:
if not is_onnx_available():
diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py
index 044d854390..db0268a2a7 100644
--- a/src/diffusers/pipelines/auto_pipeline.py
+++ b/src/diffusers/pipelines/auto_pipeline.py
@@ -119,6 +119,7 @@ from .stable_diffusion_xl import (
)
from .wan import WanImageToVideoPipeline, WanPipeline, WanVideoToVideoPipeline
from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline
+from .z_image import ZImageImg2ImgPipeline, ZImagePipeline
AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict(
@@ -162,6 +163,7 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict(
("cogview4-control", CogView4ControlPipeline),
("qwenimage", QwenImagePipeline),
("qwenimage-controlnet", QwenImageControlNetPipeline),
+ ("z-image", ZImagePipeline),
]
)
@@ -189,6 +191,7 @@ AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict(
("qwenimage", QwenImageImg2ImgPipeline),
("qwenimage-edit", QwenImageEditPipeline),
("qwenimage-edit-plus", QwenImageEditPlusPipeline),
+ ("z-image", ZImageImg2ImgPipeline),
]
)
diff --git a/src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py b/src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py
index 9e9f20c79e..8c555eabba 100644
--- a/src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py
+++ b/src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py
@@ -852,6 +852,15 @@ class HunyuanVideo15ImageToVideoPipeline(DiffusionPipeline):
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timestep = t.expand(latent_model_input.shape[0]).to(latent_model_input.dtype)
+ if self.transformer.config.use_meanflow:
+ if i == len(timesteps) - 1:
+ timestep_r = torch.tensor([0.0], device=device)
+ else:
+ timestep_r = timesteps[i + 1]
+ timestep_r = timestep_r.expand(latents.shape[0]).to(latents.dtype)
+ else:
+ timestep_r = None
+
# Step 1: Collect model inputs needed for the guidance method
# conditional inputs should always be first element in the tuple
guider_inputs = {
@@ -893,6 +902,7 @@ class HunyuanVideo15ImageToVideoPipeline(DiffusionPipeline):
hidden_states=latent_model_input,
image_embeds=image_embeds,
timestep=timestep,
+ timestep_r=timestep_r,
attention_kwargs=self.attention_kwargs,
return_dict=False,
**cond_kwargs,
diff --git a/src/diffusers/pipelines/stable_diffusion/safety_checker.py b/src/diffusers/pipelines/stable_diffusion/safety_checker.py
index 16aff10259..65daafe012 100644
--- a/src/diffusers/pipelines/stable_diffusion/safety_checker.py
+++ b/src/diffusers/pipelines/stable_diffusion/safety_checker.py
@@ -17,7 +17,7 @@ import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
-from ...utils import logging
+from ...utils import is_transformers_version, logging
logger = logging.get_logger(__name__)
@@ -46,6 +46,9 @@ class StableDiffusionSafetyChecker(PreTrainedModel):
self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False)
self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False)
+ # Model requires post_init after transformers v4.57.3
+ if is_transformers_version(">", "4.57.3"):
+ self.post_init()
@torch.no_grad()
def forward(self, clip_input, images):
diff --git a/src/diffusers/pipelines/z_image/__init__.py b/src/diffusers/pipelines/z_image/__init__.py
index f95b3e5a0b..f4342713e3 100644
--- a/src/diffusers/pipelines/z_image/__init__.py
+++ b/src/diffusers/pipelines/z_image/__init__.py
@@ -23,6 +23,7 @@ except OptionalDependencyNotAvailable:
else:
_import_structure["pipeline_output"] = ["ZImagePipelineOutput"]
_import_structure["pipeline_z_image"] = ["ZImagePipeline"]
+ _import_structure["pipeline_z_image_img2img"] = ["ZImageImg2ImgPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
@@ -35,6 +36,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
else:
from .pipeline_output import ZImagePipelineOutput
from .pipeline_z_image import ZImagePipeline
+ from .pipeline_z_image_img2img import ZImageImg2ImgPipeline
else:
import sys
diff --git a/src/diffusers/pipelines/z_image/pipeline_z_image_img2img.py b/src/diffusers/pipelines/z_image/pipeline_z_image_img2img.py
new file mode 100644
index 0000000000..2b3e80a208
--- /dev/null
+++ b/src/diffusers/pipelines/z_image/pipeline_z_image_img2img.py
@@ -0,0 +1,709 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+from typing import Any, Callable, Dict, List, Optional, Union
+
+import torch
+from transformers import AutoTokenizer, PreTrainedModel
+
+from ...image_processor import PipelineImageInput, VaeImageProcessor
+from ...loaders import FromSingleFileMixin, ZImageLoraLoaderMixin
+from ...models.autoencoders import AutoencoderKL
+from ...models.transformers import ZImageTransformer2DModel
+from ...pipelines.pipeline_utils import DiffusionPipeline
+from ...schedulers import FlowMatchEulerDiscreteScheduler
+from ...utils import logging, replace_example_docstring
+from ...utils.torch_utils import randn_tensor
+from .pipeline_output import ZImagePipelineOutput
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ >>> import torch
+ >>> from diffusers import ZImageImg2ImgPipeline
+ >>> from diffusers.utils import load_image
+
+ >>> pipe = ZImageImg2ImgPipeline.from_pretrained("Z-a-o/Z-Image-Turbo", torch_dtype=torch.bfloat16)
+ >>> pipe.to("cuda")
+
+ >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
+ >>> init_image = load_image(url).resize((1024, 1024))
+
+ >>> prompt = "A fantasy landscape with mountains and a river, detailed, vibrant colors"
+ >>> image = pipe(
+ ... prompt,
+ ... image=init_image,
+ ... strength=0.6,
+ ... num_inference_steps=9,
+ ... guidance_scale=0.0,
+ ... generator=torch.Generator("cuda").manual_seed(42),
+ ... ).images[0]
+ >>> image.save("zimage_img2img.png")
+ ```
+"""
+
+
+# Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift
+def calculate_shift(
+ image_seq_len,
+ base_seq_len: int = 256,
+ max_seq_len: int = 4096,
+ base_shift: float = 0.5,
+ max_shift: float = 1.15,
+):
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
+ b = base_shift - m * base_seq_len
+ mu = image_seq_len * m + b
+ return mu
+
+
+# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
+def retrieve_latents(
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
+):
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
+ return encoder_output.latent_dist.sample(generator)
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
+ return encoder_output.latent_dist.mode()
+ elif hasattr(encoder_output, "latents"):
+ return encoder_output.latents
+ else:
+ raise AttributeError("Could not access latents of provided encoder_output")
+
+
+# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
+def retrieve_timesteps(
+ scheduler,
+ num_inference_steps: Optional[int] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ timesteps: Optional[List[int]] = None,
+ sigmas: Optional[List[float]] = None,
+ **kwargs,
+):
+ r"""
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
+
+ Args:
+ scheduler (`SchedulerMixin`):
+ The scheduler to get timesteps from.
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
+ must be `None`.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
+ `num_inference_steps` and `sigmas` must be `None`.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
+ `num_inference_steps` and `timesteps` must be `None`.
+
+ Returns:
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
+ second element is the number of inference steps.
+ """
+ if timesteps is not None and sigmas is not None:
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
+ if timesteps is not None:
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accepts_timesteps:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" timestep schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ elif sigmas is not None:
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accept_sigmas:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ else:
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ return timesteps, num_inference_steps
+
+
+class ZImageImg2ImgPipeline(DiffusionPipeline, ZImageLoraLoaderMixin, FromSingleFileMixin):
+ r"""
+ The ZImage pipeline for image-to-image generation.
+
+ Args:
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
+ vae ([`AutoencoderKL`]):
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
+ text_encoder ([`PreTrainedModel`]):
+ A text encoder model to encode text prompts.
+ tokenizer ([`AutoTokenizer`]):
+ A tokenizer to tokenize text prompts.
+ transformer ([`ZImageTransformer2DModel`]):
+ A ZImage transformer model to denoise the encoded image latents.
+ """
+
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
+ _optional_components = []
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
+
+ def __init__(
+ self,
+ scheduler: FlowMatchEulerDiscreteScheduler,
+ vae: AutoencoderKL,
+ text_encoder: PreTrainedModel,
+ tokenizer: AutoTokenizer,
+ transformer: ZImageTransformer2DModel,
+ ):
+ super().__init__()
+
+ self.register_modules(
+ vae=vae,
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ scheduler=scheduler,
+ transformer=transformer,
+ )
+ self.vae_scale_factor = (
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
+ )
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
+
+ # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline.encode_prompt
+ def encode_prompt(
+ self,
+ prompt: Union[str, List[str]],
+ device: Optional[torch.device] = None,
+ do_classifier_free_guidance: bool = True,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ prompt_embeds: Optional[List[torch.FloatTensor]] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ max_sequence_length: int = 512,
+ ):
+ prompt = [prompt] if isinstance(prompt, str) else prompt
+ prompt_embeds = self._encode_prompt(
+ prompt=prompt,
+ device=device,
+ prompt_embeds=prompt_embeds,
+ max_sequence_length=max_sequence_length,
+ )
+
+ if do_classifier_free_guidance:
+ if negative_prompt is None:
+ negative_prompt = ["" for _ in prompt]
+ else:
+ negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
+ assert len(prompt) == len(negative_prompt)
+ negative_prompt_embeds = self._encode_prompt(
+ prompt=negative_prompt,
+ device=device,
+ prompt_embeds=negative_prompt_embeds,
+ max_sequence_length=max_sequence_length,
+ )
+ else:
+ negative_prompt_embeds = []
+ return prompt_embeds, negative_prompt_embeds
+
+ # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline._encode_prompt
+ def _encode_prompt(
+ self,
+ prompt: Union[str, List[str]],
+ device: Optional[torch.device] = None,
+ prompt_embeds: Optional[List[torch.FloatTensor]] = None,
+ max_sequence_length: int = 512,
+ ) -> List[torch.FloatTensor]:
+ device = device or self._execution_device
+
+ if prompt_embeds is not None:
+ return prompt_embeds
+
+ if isinstance(prompt, str):
+ prompt = [prompt]
+
+ for i, prompt_item in enumerate(prompt):
+ messages = [
+ {"role": "user", "content": prompt_item},
+ ]
+ prompt_item = self.tokenizer.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True,
+ enable_thinking=True,
+ )
+ prompt[i] = prompt_item
+
+ text_inputs = self.tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=max_sequence_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ text_input_ids = text_inputs.input_ids.to(device)
+ prompt_masks = text_inputs.attention_mask.to(device).bool()
+
+ prompt_embeds = self.text_encoder(
+ input_ids=text_input_ids,
+ attention_mask=prompt_masks,
+ output_hidden_states=True,
+ ).hidden_states[-2]
+
+ embeddings_list = []
+
+ for i in range(len(prompt_embeds)):
+ embeddings_list.append(prompt_embeds[i][prompt_masks[i]])
+
+ return embeddings_list
+
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps
+ def get_timesteps(self, num_inference_steps, strength, device):
+ # get the original timestep using init_timestep
+ init_timestep = min(num_inference_steps * strength, num_inference_steps)
+
+ t_start = int(max(num_inference_steps - init_timestep, 0))
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
+ if hasattr(self.scheduler, "set_begin_index"):
+ self.scheduler.set_begin_index(t_start * self.scheduler.order)
+
+ return timesteps, num_inference_steps - t_start
+
+ def prepare_latents(
+ self,
+ image,
+ timestep,
+ batch_size,
+ num_channels_latents,
+ height,
+ width,
+ dtype,
+ device,
+ generator,
+ latents=None,
+ ):
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
+
+ shape = (batch_size, num_channels_latents, height, width)
+
+ if latents is not None:
+ return latents.to(device=device, dtype=dtype)
+
+ # Encode the input image
+ image = image.to(device=device, dtype=dtype)
+ if image.shape[1] != num_channels_latents:
+ if isinstance(generator, list):
+ image_latents = [
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
+ for i in range(image.shape[0])
+ ]
+ image_latents = torch.cat(image_latents, dim=0)
+ else:
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
+
+ # Apply scaling (inverse of decoding: decode does latents/scaling_factor + shift_factor)
+ image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
+ else:
+ image_latents = image
+
+ # Handle batch size expansion
+ if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
+ additional_image_per_prompt = batch_size // image_latents.shape[0]
+ image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
+ elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
+ raise ValueError(
+ f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
+ )
+
+ # Add noise using flow matching scale_noise
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ latents = self.scheduler.scale_noise(image_latents, timestep, noise)
+
+ return latents
+
+ @property
+ def guidance_scale(self):
+ return self._guidance_scale
+
+ @property
+ def do_classifier_free_guidance(self):
+ return self._guidance_scale > 1
+
+ @property
+ def joint_attention_kwargs(self):
+ return self._joint_attention_kwargs
+
+ @property
+ def num_timesteps(self):
+ return self._num_timesteps
+
+ @property
+ def interrupt(self):
+ return self._interrupt
+
+ @torch.no_grad()
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ image: PipelineImageInput = None,
+ strength: float = 0.6,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ sigmas: Optional[List[float]] = None,
+ guidance_scale: float = 5.0,
+ cfg_normalization: bool = False,
+ cfg_truncation: float = 1.0,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[List[torch.FloatTensor]] = None,
+ negative_prompt_embeds: Optional[List[torch.FloatTensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ max_sequence_length: int = 512,
+ ):
+ r"""
+ Function invoked when calling the pipeline for image-to-image generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
+ instead.
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
+ `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
+ numpy array and pytorch tensor, the expected value range is between `[0, 1]`. If it's a tensor or a
+ list of tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or
+ a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`.
+ strength (`float`, *optional*, defaults to 0.6):
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
+ essentially ignores `image`.
+ height (`int`, *optional*, defaults to 1024):
+ The height in pixels of the generated image. If not provided, uses the input image height.
+ width (`int`, *optional*, defaults to 1024):
+ The width in pixels of the generated image. If not provided, uses the input image width.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
+ will be used.
+ guidance_scale (`float`, *optional*, defaults to 5.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ cfg_normalization (`bool`, *optional*, defaults to False):
+ Whether to apply configuration normalization.
+ cfg_truncation (`float`, *optional*, defaults to 1.0):
+ The truncation value for configuration.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will be generated by sampling using the supplied random `generator`.
+ prompt_embeds (`List[torch.FloatTensor]`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`List[torch.FloatTensor]`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.ZImagePipelineOutput`] instead of a plain
+ tuple.
+ joint_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
+ callback_on_step_end (`Callable`, *optional*):
+ A function that calls at the end of each denoising steps during the inference. The function is called
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
+ `callback_on_step_end_tensor_inputs`.
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
+ `._callback_tensor_inputs` attribute of your pipeline class.
+ max_sequence_length (`int`, *optional*, defaults to 512):
+ Maximum sequence length to use with the `prompt`.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.z_image.ZImagePipelineOutput`] or `tuple`: [`~pipelines.z_image.ZImagePipelineOutput`] if
+ `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the
+ generated images.
+ """
+ # 1. Check inputs and validate strength
+ if strength < 0 or strength > 1:
+ raise ValueError(f"The value of strength should be in [0.0, 1.0] but is {strength}")
+
+ # 2. Preprocess image
+ init_image = self.image_processor.preprocess(image)
+ init_image = init_image.to(dtype=torch.float32)
+
+ # Get dimensions from the preprocessed image if not specified
+ if height is None:
+ height = init_image.shape[-2]
+ if width is None:
+ width = init_image.shape[-1]
+
+ vae_scale = self.vae_scale_factor * 2
+ if height % vae_scale != 0:
+ raise ValueError(
+ f"Height must be divisible by {vae_scale} (got {height}). "
+ f"Please adjust the height to a multiple of {vae_scale}."
+ )
+ if width % vae_scale != 0:
+ raise ValueError(
+ f"Width must be divisible by {vae_scale} (got {width}). "
+ f"Please adjust the width to a multiple of {vae_scale}."
+ )
+
+ device = self._execution_device
+
+ self._guidance_scale = guidance_scale
+ self._joint_attention_kwargs = joint_attention_kwargs
+ self._interrupt = False
+ self._cfg_normalization = cfg_normalization
+ self._cfg_truncation = cfg_truncation
+
+ # 3. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = len(prompt_embeds)
+
+ # If prompt_embeds is provided and prompt is None, skip encoding
+ if prompt_embeds is not None and prompt is None:
+ if self.do_classifier_free_guidance and negative_prompt_embeds is None:
+ raise ValueError(
+ "When `prompt_embeds` is provided without `prompt`, "
+ "`negative_prompt_embeds` must also be provided for classifier-free guidance."
+ )
+ else:
+ (
+ prompt_embeds,
+ negative_prompt_embeds,
+ ) = self.encode_prompt(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ device=device,
+ max_sequence_length=max_sequence_length,
+ )
+
+ # 4. Prepare latent variables
+ num_channels_latents = self.transformer.in_channels
+
+ # Repeat prompt_embeds for num_images_per_prompt
+ if num_images_per_prompt > 1:
+ prompt_embeds = [pe for pe in prompt_embeds for _ in range(num_images_per_prompt)]
+ if self.do_classifier_free_guidance and negative_prompt_embeds:
+ negative_prompt_embeds = [npe for npe in negative_prompt_embeds for _ in range(num_images_per_prompt)]
+
+ actual_batch_size = batch_size * num_images_per_prompt
+
+ # Calculate latent dimensions for image_seq_len
+ latent_height = 2 * (int(height) // (self.vae_scale_factor * 2))
+ latent_width = 2 * (int(width) // (self.vae_scale_factor * 2))
+ image_seq_len = (latent_height // 2) * (latent_width // 2)
+
+ # 5. Prepare timesteps
+ mu = calculate_shift(
+ image_seq_len,
+ self.scheduler.config.get("base_image_seq_len", 256),
+ self.scheduler.config.get("max_image_seq_len", 4096),
+ self.scheduler.config.get("base_shift", 0.5),
+ self.scheduler.config.get("max_shift", 1.15),
+ )
+ self.scheduler.sigma_min = 0.0
+ scheduler_kwargs = {"mu": mu}
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler,
+ num_inference_steps,
+ device,
+ sigmas=sigmas,
+ **scheduler_kwargs,
+ )
+
+ # 6. Adjust timesteps based on strength
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
+ if num_inference_steps < 1:
+ raise ValueError(
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline "
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
+ )
+ latent_timestep = timesteps[:1].repeat(actual_batch_size)
+
+ # 7. Prepare latents from image
+ latents = self.prepare_latents(
+ init_image,
+ latent_timestep,
+ actual_batch_size,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds[0].dtype,
+ device,
+ generator,
+ latents,
+ )
+
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
+ self._num_timesteps = len(timesteps)
+
+ # 8. Denoising loop
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ if self.interrupt:
+ continue
+
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
+ timestep = t.expand(latents.shape[0])
+ timestep = (1000 - timestep) / 1000
+ # Normalized time for time-aware config (0 at start, 1 at end)
+ t_norm = timestep[0].item()
+
+ # Handle cfg truncation
+ current_guidance_scale = self.guidance_scale
+ if (
+ self.do_classifier_free_guidance
+ and self._cfg_truncation is not None
+ and float(self._cfg_truncation) <= 1
+ ):
+ if t_norm > self._cfg_truncation:
+ current_guidance_scale = 0.0
+
+ # Run CFG only if configured AND scale is non-zero
+ apply_cfg = self.do_classifier_free_guidance and current_guidance_scale > 0
+
+ if apply_cfg:
+ latents_typed = latents.to(self.transformer.dtype)
+ latent_model_input = latents_typed.repeat(2, 1, 1, 1)
+ prompt_embeds_model_input = prompt_embeds + negative_prompt_embeds
+ timestep_model_input = timestep.repeat(2)
+ else:
+ latent_model_input = latents.to(self.transformer.dtype)
+ prompt_embeds_model_input = prompt_embeds
+ timestep_model_input = timestep
+
+ latent_model_input = latent_model_input.unsqueeze(2)
+ latent_model_input_list = list(latent_model_input.unbind(dim=0))
+
+ model_out_list = self.transformer(
+ latent_model_input_list,
+ timestep_model_input,
+ prompt_embeds_model_input,
+ )[0]
+
+ if apply_cfg:
+ # Perform CFG
+ pos_out = model_out_list[:actual_batch_size]
+ neg_out = model_out_list[actual_batch_size:]
+
+ noise_pred = []
+ for j in range(actual_batch_size):
+ pos = pos_out[j].float()
+ neg = neg_out[j].float()
+
+ pred = pos + current_guidance_scale * (pos - neg)
+
+ # Renormalization
+ if self._cfg_normalization and float(self._cfg_normalization) > 0.0:
+ ori_pos_norm = torch.linalg.vector_norm(pos)
+ new_pos_norm = torch.linalg.vector_norm(pred)
+ max_new_norm = ori_pos_norm * float(self._cfg_normalization)
+ if new_pos_norm > max_new_norm:
+ pred = pred * (max_new_norm / new_pos_norm)
+
+ noise_pred.append(pred)
+
+ noise_pred = torch.stack(noise_pred, dim=0)
+ else:
+ noise_pred = torch.stack([t.float() for t in model_out_list], dim=0)
+
+ noise_pred = noise_pred.squeeze(2)
+ noise_pred = -noise_pred
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred.to(torch.float32), t, latents, return_dict=False)[0]
+ assert latents.dtype == torch.float32
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+
+ if output_type == "latent":
+ image = latents
+
+ else:
+ latents = latents.to(self.vae.dtype)
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
+
+ image = self.vae.decode(latents, return_dict=False)[0]
+ image = self.image_processor.postprocess(image, output_type=output_type)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image,)
+
+ return ZImagePipelineOutput(images=image)
diff --git a/src/diffusers/quantizers/modelopt/modelopt_quantizer.py b/src/diffusers/quantizers/modelopt/modelopt_quantizer.py
index 534f752321..7312036f52 100644
--- a/src/diffusers/quantizers/modelopt/modelopt_quantizer.py
+++ b/src/diffusers/quantizers/modelopt/modelopt_quantizer.py
@@ -27,7 +27,7 @@ logger = logging.get_logger(__name__)
class NVIDIAModelOptQuantizer(DiffusersQuantizer):
r"""
- Diffusers Quantizer for TensorRT Model Optimizer
+ Diffusers Quantizer for Nvidia-Model Optimizer
"""
use_keep_in_fp32_modules = True
diff --git a/src/diffusers/schedulers/scheduling_deis_multistep.py b/src/diffusers/schedulers/scheduling_deis_multistep.py
index 09ce338a92..b7d64fc00b 100644
--- a/src/diffusers/schedulers/scheduling_deis_multistep.py
+++ b/src/diffusers/schedulers/scheduling_deis_multistep.py
@@ -84,33 +84,35 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
methods the library implements for all schedulers such as loading and saving.
Args:
- num_train_timesteps (`int`, defaults to 1000):
+ num_train_timesteps (`int`, defaults to `1000`):
The number of diffusion steps to train the model.
- beta_start (`float`, defaults to 0.0001):
+ beta_start (`float`, defaults to `0.0001`):
The starting `beta` value of inference.
- beta_end (`float`, defaults to 0.02):
+ beta_end (`float`, defaults to `0.02`):
The final `beta` value.
- beta_schedule (`str`, defaults to `"linear"`):
+ beta_schedule (`"linear"`, `"scaled_linear"`, or `"squaredcos_cap_v2"`, defaults to `"linear"`):
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
- trained_betas (`np.ndarray`, *optional*):
+ trained_betas (`np.ndarray` or `List[float]`, *optional*):
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
- solver_order (`int`, defaults to 2):
+ solver_order (`int`, defaults to `2`):
The DEIS order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided
sampling, and `solver_order=3` for unconditional sampling.
- prediction_type (`str`, defaults to `epsilon`):
+ prediction_type (`"epsilon"`, `"sample"`, `"v_prediction"`, or `"flow_prediction"`, defaults to `"epsilon"`):
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
- `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
- Video](https://huggingface.co/papers/2210.02303) paper).
+ `sample` (directly predicts the noisy sample`), `v_prediction` (see section 2.4 of [Imagen
+ Video](https://huggingface.co/papers/2210.02303) paper), or `flow_prediction`.
thresholding (`bool`, defaults to `False`):
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
as Stable Diffusion.
- dynamic_thresholding_ratio (`float`, defaults to 0.995):
+ dynamic_thresholding_ratio (`float`, defaults to `0.995`):
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
- sample_max_value (`float`, defaults to 1.0):
+ sample_max_value (`float`, defaults to `1.0`):
The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
- algorithm_type (`str`, defaults to `deis`):
+ algorithm_type (`"deis"`, defaults to `"deis"`):
The algorithm type for the solver.
+ solver_type (`"logrho"`, defaults to `"logrho"`):
+ Solver type for DEIS.
lower_order_final (`bool`, defaults to `True`):
Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps.
use_karras_sigmas (`bool`, *optional*, defaults to `False`):
@@ -121,11 +123,19 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
use_beta_sigmas (`bool`, *optional*, defaults to `False`):
Whether to use beta sigmas for step sizes in the noise schedule during the sampling process. Refer to [Beta
Sampling is All You Need](https://huggingface.co/papers/2407.12173) for more information.
- timestep_spacing (`str`, defaults to `"linspace"`):
+ use_flow_sigmas (`bool`, *optional*, defaults to `False`):
+ Whether to use flow sigmas for step sizes in the noise schedule during the sampling process.
+ flow_shift (`float`, *optional*, defaults to `1.0`):
+ The flow shift parameter for flow-based models.
+ timestep_spacing (`"linspace"`, `"leading"`, or `"trailing"`, defaults to `"linspace"`):
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
- steps_offset (`int`, defaults to 0):
+ steps_offset (`int`, defaults to `0`):
An offset added to the inference steps, as required by some model families.
+ use_dynamic_shifting (`bool`, defaults to `False`):
+ Whether to use dynamic shifting for the noise schedule.
+ time_shift_type (`"exponential"`, defaults to `"exponential"`):
+ The type of time shifting to apply.
"""
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
@@ -137,29 +147,38 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
- beta_schedule: str = "linear",
- trained_betas: Optional[np.ndarray] = None,
+ beta_schedule: Literal["linear", "scaled_linear", "squaredcos_cap_v2"] = "linear",
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
solver_order: int = 2,
- prediction_type: str = "epsilon",
+ prediction_type: Literal["epsilon", "sample", "v_prediction", "flow_prediction"] = "epsilon",
thresholding: bool = False,
dynamic_thresholding_ratio: float = 0.995,
sample_max_value: float = 1.0,
- algorithm_type: str = "deis",
- solver_type: str = "logrho",
+ algorithm_type: Literal["deis"] = "deis",
+ solver_type: Literal["logrho"] = "logrho",
lower_order_final: bool = True,
use_karras_sigmas: Optional[bool] = False,
use_exponential_sigmas: Optional[bool] = False,
use_beta_sigmas: Optional[bool] = False,
use_flow_sigmas: Optional[bool] = False,
flow_shift: Optional[float] = 1.0,
- timestep_spacing: str = "linspace",
+ timestep_spacing: Literal["linspace", "leading", "trailing"] = "linspace",
steps_offset: int = 0,
use_dynamic_shifting: bool = False,
- time_shift_type: str = "exponential",
- ):
+ time_shift_type: Literal["exponential"] = "exponential",
+ ) -> None:
if self.config.use_beta_sigmas and not is_scipy_available():
raise ImportError("Make sure to install scipy if you want to use beta sigmas.")
- if sum([self.config.use_beta_sigmas, self.config.use_exponential_sigmas, self.config.use_karras_sigmas]) > 1:
+ if (
+ sum(
+ [
+ self.config.use_beta_sigmas,
+ self.config.use_exponential_sigmas,
+ self.config.use_karras_sigmas,
+ ]
+ )
+ > 1
+ ):
raise ValueError(
"Only one of `config.use_beta_sigmas`, `config.use_exponential_sigmas`, `config.use_karras_sigmas` can be used."
)
@@ -169,7 +188,15 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
- self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
+ self.betas = (
+ torch.linspace(
+ beta_start**0.5,
+ beta_end**0.5,
+ num_train_timesteps,
+ dtype=torch.float32,
+ )
+ ** 2
+ )
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
@@ -211,21 +238,21 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
@property
- def step_index(self):
+ def step_index(self) -> Optional[int]:
"""
The index counter for current timestep. It will increase 1 after each scheduler step.
"""
return self._step_index
@property
- def begin_index(self):
+ def begin_index(self) -> Optional[int]:
"""
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
"""
return self._begin_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
- def set_begin_index(self, begin_index: int = 0):
+ def set_begin_index(self, begin_index: int = 0) -> None:
"""
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
@@ -236,8 +263,11 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
self._begin_index = begin_index
def set_timesteps(
- self, num_inference_steps: int, device: Union[str, torch.device] = None, mu: Optional[float] = None
- ):
+ self,
+ num_inference_steps: int,
+ device: Union[str, torch.device] = None,
+ mu: Optional[float] = None,
+ ) -> None:
"""
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
@@ -246,6 +276,9 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
The number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ mu (`float`, *optional*):
+ The mu parameter for dynamic shifting. Only used when `use_dynamic_shifting=True` and
+ `time_shift_type="exponential"`.
"""
if mu is not None:
assert self.config.use_dynamic_shifting and self.config.time_shift_type == "exponential"
@@ -363,7 +396,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
return sample
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
- def _sigma_to_t(self, sigma, log_sigmas):
+ def _sigma_to_t(self, sigma: np.ndarray, log_sigmas: np.ndarray) -> np.ndarray:
"""
Convert sigma values to corresponding timestep values through interpolation.
@@ -400,7 +433,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
return t
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t
- def _sigma_to_alpha_sigma_t(self, sigma):
+ def _sigma_to_alpha_sigma_t(self, sigma: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Convert sigma values to alpha_t and sigma_t values.
@@ -422,7 +455,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
return alpha_t, sigma_t
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
- def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
+ def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
@@ -648,7 +681,10 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
"Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
)
- sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index]
+ sigma_t, sigma_s = (
+ self.sigmas[self.step_index + 1],
+ self.sigmas[self.step_index],
+ )
alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t)
alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s)
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
@@ -714,7 +750,11 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
m0, m1 = model_output_list[-1], model_output_list[-2]
- rho_t, rho_s0, rho_s1 = sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1
+ rho_t, rho_s0, rho_s1 = (
+ sigma_t / alpha_t,
+ sigma_s0 / alpha_s0,
+ sigma_s1 / alpha_s1,
+ )
if self.config.algorithm_type == "deis":
@@ -854,7 +894,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
return step_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index
- def _init_step_index(self, timestep):
+ def _init_step_index(self, timestep: Union[int, torch.Tensor]) -> None:
"""
Initialize the step_index counter for the scheduler.
@@ -884,18 +924,17 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
Args:
model_output (`torch.Tensor`):
The direct output from learned diffusion model.
- timestep (`int`):
+ timestep (`int` or `torch.Tensor`):
The current discrete timestep in the diffusion chain.
sample (`torch.Tensor`):
A current instance of a sample created by the diffusion process.
- return_dict (`bool`):
+ return_dict (`bool`, defaults to `True`):
Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`.
Returns:
[`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a
tuple is returned where the first element is the sample tensor.
-
"""
if self.num_inference_steps is None:
raise ValueError(
@@ -1000,5 +1039,5 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
noisy_samples = alpha_t * original_samples + sigma_t * noise
return noisy_samples
- def __len__(self):
+ def __len__(self) -> int:
return self.config.num_train_timesteps
diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py b/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py
index 55c9fb6e73..4916e1abb5 100644
--- a/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py
+++ b/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py
@@ -86,42 +86,42 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
methods the library implements for all schedulers such as loading and saving.
Args:
- num_train_timesteps (`int`, defaults to 1000):
+ num_train_timesteps (`int`, defaults to `1000`):
The number of diffusion steps to train the model.
- beta_start (`float`, defaults to 0.0001):
+ beta_start (`float`, defaults to `0.0001`):
The starting `beta` value of inference.
- beta_end (`float`, defaults to 0.02):
+ beta_end (`float`, defaults to `0.02`):
The final `beta` value.
- beta_schedule (`str`, defaults to `"linear"`):
+ beta_schedule (`"linear"`, `"scaled_linear"`, or `"squaredcos_cap_v2"`, defaults to `"linear"`):
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
- trained_betas (`np.ndarray`, *optional*):
+ trained_betas (`np.ndarray` or `List[float]`, *optional*):
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
- solver_order (`int`, defaults to 2):
+ solver_order (`int`, defaults to `2`):
The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided
sampling, and `solver_order=3` for unconditional sampling.
- prediction_type (`str`, defaults to `epsilon`, *optional*):
+ prediction_type (`"epsilon"`, `"sample"`, `"v_prediction"`, or `"flow_prediction"`, defaults to `"epsilon"`):
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
- `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
- Video](https://huggingface.co/papers/2210.02303) paper).
+ `sample` (directly predicts the noisy sample`), `v_prediction` (see section 2.4 of [Imagen
+ Video](https://huggingface.co/papers/2210.02303) paper), or `flow_prediction`.
thresholding (`bool`, defaults to `False`):
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
as Stable Diffusion.
- dynamic_thresholding_ratio (`float`, defaults to 0.995):
+ dynamic_thresholding_ratio (`float`, defaults to `0.995`):
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
- sample_max_value (`float`, defaults to 1.0):
+ sample_max_value (`float`, defaults to `1.0`):
The threshold value for dynamic thresholding. Valid only when `thresholding=True` and
`algorithm_type="dpmsolver++"`.
- algorithm_type (`str`, defaults to `dpmsolver++`):
- Algorithm type for the solver; can be `dpmsolver` or `dpmsolver++` or `sde-dpmsolver++`. The `dpmsolver`
+ algorithm_type (`"dpmsolver"`, `"dpmsolver++"`, or `"sde-dpmsolver++"`, defaults to `"dpmsolver++"`):
+ Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, or `sde-dpmsolver++`. The `dpmsolver`
type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) paper, and the
`dpmsolver++` type implements the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095)
paper. It is recommended to use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided
sampling like in Stable Diffusion.
- solver_type (`str`, defaults to `midpoint`):
+ solver_type (`"midpoint"` or `"heun"`, defaults to `"midpoint"`):
Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the
sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers.
- lower_order_final (`bool`, defaults to `True`):
+ lower_order_final (`bool`, defaults to `False`):
Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can
stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10.
use_karras_sigmas (`bool`, *optional*, defaults to `False`):
@@ -132,15 +132,23 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
use_beta_sigmas (`bool`, *optional*, defaults to `False`):
Whether to use beta sigmas for step sizes in the noise schedule during the sampling process. Refer to [Beta
Sampling is All You Need](https://huggingface.co/papers/2407.12173) for more information.
- final_sigmas_type (`str`, *optional*, defaults to `"zero"`):
+ use_flow_sigmas (`bool`, *optional*, defaults to `False`):
+ Whether to use flow sigmas for step sizes in the noise schedule during the sampling process.
+ flow_shift (`float`, *optional*, defaults to `1.0`):
+ The flow shift parameter for flow-based models.
+ final_sigmas_type (`"zero"` or `"sigma_min"`, *optional*, defaults to `"zero"`):
The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final
- sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0.
+ sigma is the same as the last sigma in the training schedule. If `"zero"`, the final sigma is set to 0.
lambda_min_clipped (`float`, defaults to `-inf`):
Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the
cosine (`squaredcos_cap_v2`) noise schedule.
- variance_type (`str`, *optional*):
- Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output
- contains the predicted Gaussian variance.
+ variance_type (`"learned"` or `"learned_range"`, *optional*):
+ Set to `"learned"` or `"learned_range"` for diffusion models that predict variance. If set, the model's
+ output contains the predicted Gaussian variance.
+ use_dynamic_shifting (`bool`, defaults to `False`):
+ Whether to use dynamic shifting for the noise schedule.
+ time_shift_type (`"exponential"`, defaults to `"exponential"`):
+ The type of time shifting to apply.
"""
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
@@ -152,27 +160,27 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
- beta_schedule: str = "linear",
- trained_betas: Optional[np.ndarray] = None,
+ beta_schedule: Literal["linear", "scaled_linear", "squaredcos_cap_v2"] = "linear",
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
solver_order: int = 2,
- prediction_type: str = "epsilon",
+ prediction_type: Literal["epsilon", "sample", "v_prediction", "flow_prediction"] = "epsilon",
thresholding: bool = False,
dynamic_thresholding_ratio: float = 0.995,
sample_max_value: float = 1.0,
- algorithm_type: str = "dpmsolver++",
- solver_type: str = "midpoint",
+ algorithm_type: Literal["dpmsolver", "dpmsolver++", "sde-dpmsolver++"] = "dpmsolver++",
+ solver_type: Literal["midpoint", "heun"] = "midpoint",
lower_order_final: bool = False,
use_karras_sigmas: Optional[bool] = False,
use_exponential_sigmas: Optional[bool] = False,
use_beta_sigmas: Optional[bool] = False,
use_flow_sigmas: Optional[bool] = False,
flow_shift: Optional[float] = 1.0,
- final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
+ final_sigmas_type: Optional[Literal["zero", "sigma_min"]] = "zero",
lambda_min_clipped: float = -float("inf"),
- variance_type: Optional[str] = None,
+ variance_type: Optional[Literal["learned", "learned_range"]] = None,
use_dynamic_shifting: bool = False,
- time_shift_type: str = "exponential",
- ):
+ time_shift_type: Literal["exponential"] = "exponential",
+ ) -> None:
if self.config.use_beta_sigmas and not is_scipy_available():
raise ImportError("Make sure to install scipy if you want to use beta sigmas.")
if sum([self.config.use_beta_sigmas, self.config.use_exponential_sigmas, self.config.use_karras_sigmas]) > 1:
@@ -242,6 +250,10 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
Args:
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model.
+
+ Returns:
+ `List[int]`:
+ The list of solver orders for each timestep.
"""
steps = num_inference_steps
order = self.config.solver_order
@@ -276,21 +288,29 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
return orders
@property
- def step_index(self):
+ def step_index(self) -> Optional[int]:
"""
The index counter for current timestep. It will increase 1 after each scheduler step.
+
+ Returns:
+ `int` or `None`:
+ The current step index.
"""
return self._step_index
@property
- def begin_index(self):
+ def begin_index(self) -> Optional[int]:
"""
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
+
+ Returns:
+ `int` or `None`:
+ The begin index.
"""
return self._begin_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
- def set_begin_index(self, begin_index: int = 0):
+ def set_begin_index(self, begin_index: int = 0) -> None:
"""
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
@@ -302,19 +322,21 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
def set_timesteps(
self,
- num_inference_steps: int = None,
- device: Union[str, torch.device] = None,
+ num_inference_steps: Optional[int] = None,
+ device: Optional[Union[str, torch.device]] = None,
mu: Optional[float] = None,
timesteps: Optional[List[int]] = None,
- ):
+ ) -> None:
"""
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
Args:
- num_inference_steps (`int`):
+ num_inference_steps (`int`, *optional*):
The number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ mu (`float`, *optional*):
+ The mu parameter for dynamic shifting.
timesteps (`List[int]`, *optional*):
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
timestep spacing strategy of equal spacing between timesteps schedule is used. If `timesteps` is
@@ -453,7 +475,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
return sample
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
- def _sigma_to_t(self, sigma, log_sigmas):
+ def _sigma_to_t(self, sigma: np.ndarray, log_sigmas: np.ndarray) -> np.ndarray:
"""
Convert sigma values to corresponding timestep values through interpolation.
@@ -490,7 +512,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
return t
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t
- def _sigma_to_alpha_sigma_t(self, sigma):
+ def _sigma_to_alpha_sigma_t(self, sigma: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Convert sigma values to alpha_t and sigma_t values.
@@ -512,7 +534,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
return alpha_t, sigma_t
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
- def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
+ def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
@@ -637,7 +659,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
self,
model_output: torch.Tensor,
*args,
- sample: torch.Tensor = None,
+ sample: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""
@@ -733,7 +755,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
self,
model_output: torch.Tensor,
*args,
- sample: torch.Tensor = None,
+ sample: Optional[torch.Tensor] = None,
noise: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
@@ -797,7 +819,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
self,
model_output_list: List[torch.Tensor],
*args,
- sample: torch.Tensor = None,
+ sample: Optional[torch.Tensor] = None,
noise: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
@@ -908,7 +930,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
self,
model_output_list: List[torch.Tensor],
*args,
- sample: torch.Tensor = None,
+ sample: Optional[torch.Tensor] = None,
noise: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
@@ -1030,8 +1052,8 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
self,
model_output_list: List[torch.Tensor],
*args,
- sample: torch.Tensor = None,
- order: int = None,
+ sample: Optional[torch.Tensor] = None,
+ order: Optional[int] = None,
noise: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
@@ -1125,7 +1147,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
return step_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index
- def _init_step_index(self, timestep):
+ def _init_step_index(self, timestep: Union[int, torch.Tensor]) -> None:
"""
Initialize the step_index counter for the scheduler.
@@ -1146,7 +1168,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
model_output: torch.Tensor,
timestep: Union[int, torch.Tensor],
sample: torch.Tensor,
- generator=None,
+ generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
"""
@@ -1156,11 +1178,13 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
Args:
model_output (`torch.Tensor`):
The direct output from learned diffusion model.
- timestep (`int`):
+ timestep (`int` or `torch.Tensor`):
The current discrete timestep in the diffusion chain.
sample (`torch.Tensor`):
A current instance of a sample created by the diffusion process.
- return_dict (`bool`):
+ generator (`torch.Generator`, *optional*):
+ A random number generator for stochastic sampling.
+ return_dict (`bool`, defaults to `True`):
Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`.
Returns:
@@ -1277,5 +1301,5 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
noisy_samples = alpha_t * original_samples + sigma_t * noise
return noisy_samples
- def __len__(self):
+ def __len__(self) -> int:
return self.config.num_train_timesteps
diff --git a/src/diffusers/schedulers/scheduling_unipc_multistep.py b/src/diffusers/schedulers/scheduling_unipc_multistep.py
index 6800c12201..689c6a0635 100644
--- a/src/diffusers/schedulers/scheduling_unipc_multistep.py
+++ b/src/diffusers/schedulers/scheduling_unipc_multistep.py
@@ -77,7 +77,7 @@ def betas_for_alpha_bar(
# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
-def rescale_zero_terminal_snr(betas):
+def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor:
"""
Rescales betas to have zero terminal SNR Based on https://huggingface.co/papers/2305.08891 (Algorithm 1)
@@ -127,19 +127,19 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
The starting `beta` value of inference.
beta_end (`float`, defaults to 0.02):
The final `beta` value.
- beta_schedule (`str`, defaults to `"linear"`):
+ beta_schedule (`"linear"`, `"scaled_linear"`, or `"squaredcos_cap_v2"`, defaults to `"linear"`):
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, *optional*):
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
- solver_order (`int`, default `2`):
+ solver_order (`int`, defaults to `2`):
The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1`
due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for
unconditional sampling.
- prediction_type (`str`, defaults to `epsilon`, *optional*):
+ prediction_type (`"epsilon"`, `"sample"`, `"v_prediction"`, or `"flow_prediction"`, defaults to `"epsilon"`, *optional*):
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
- `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
- Video](https://huggingface.co/papers/2210.02303) paper).
+ `sample` (directly predicts the noisy sample`), `v_prediction` (see section 2.4 of [Imagen
+ Video](https://huggingface.co/papers/2210.02303) paper), or `flow_prediction`.
thresholding (`bool`, defaults to `False`):
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
as Stable Diffusion.
@@ -149,7 +149,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`.
predict_x0 (`bool`, defaults to `True`):
Whether to use the updating algorithm on the predicted x0.
- solver_type (`str`, default `bh2`):
+ solver_type (`"bh1"` or `"bh2"`, defaults to `"bh2"`):
Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2`
otherwise.
lower_order_final (`bool`, default `True`):
@@ -171,12 +171,12 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
Sampling is All You Need](https://huggingface.co/papers/2407.12173) for more information.
use_flow_sigmas (`bool`, *optional*, defaults to `False`):
Whether to use flow sigmas for step sizes in the noise schedule during the sampling process.
- timestep_spacing (`str`, defaults to `"linspace"`):
+ timestep_spacing (`"linspace"`, `"leading"`, or `"trailing"`, defaults to `"linspace"`):
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
steps_offset (`int`, defaults to 0):
An offset added to the inference steps, as required by some model families.
- final_sigmas_type (`str`, defaults to `"zero"`):
+ final_sigmas_type (`"zero"` or `"sigma_min"`, defaults to `"zero"`):
The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final
sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0.
rescale_betas_zero_snr (`bool`, defaults to `False`):
@@ -194,30 +194,30 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
- beta_schedule: str = "linear",
+ beta_schedule: Literal["linear", "scaled_linear", "squaredcos_cap_v2"] = "linear",
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
solver_order: int = 2,
- prediction_type: str = "epsilon",
+ prediction_type: Literal["epsilon", "sample", "v_prediction", "flow_prediction"] = "epsilon",
thresholding: bool = False,
dynamic_thresholding_ratio: float = 0.995,
sample_max_value: float = 1.0,
predict_x0: bool = True,
- solver_type: str = "bh2",
+ solver_type: Literal["bh1", "bh2"] = "bh2",
lower_order_final: bool = True,
disable_corrector: List[int] = [],
- solver_p: SchedulerMixin = None,
+ solver_p: Optional[SchedulerMixin] = None,
use_karras_sigmas: Optional[bool] = False,
use_exponential_sigmas: Optional[bool] = False,
use_beta_sigmas: Optional[bool] = False,
use_flow_sigmas: Optional[bool] = False,
flow_shift: Optional[float] = 1.0,
- timestep_spacing: str = "linspace",
+ timestep_spacing: Literal["linspace", "leading", "trailing"] = "linspace",
steps_offset: int = 0,
- final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
+ final_sigmas_type: Optional[Literal["zero", "sigma_min"]] = "zero",
rescale_betas_zero_snr: bool = False,
use_dynamic_shifting: bool = False,
- time_shift_type: str = "exponential",
- ):
+ time_shift_type: Literal["exponential"] = "exponential",
+ ) -> None:
if self.config.use_beta_sigmas and not is_scipy_available():
raise ImportError("Make sure to install scipy if you want to use beta sigmas.")
if sum([self.config.use_beta_sigmas, self.config.use_exponential_sigmas, self.config.use_karras_sigmas]) > 1:
@@ -279,21 +279,21 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
@property
- def step_index(self):
+ def step_index(self) -> Optional[int]:
"""
The index counter for current timestep. It will increase 1 after each scheduler step.
"""
return self._step_index
@property
- def begin_index(self):
+ def begin_index(self) -> Optional[int]:
"""
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
"""
return self._begin_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
- def set_begin_index(self, begin_index: int = 0):
+ def set_begin_index(self, begin_index: int = 0) -> None:
"""
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
@@ -304,8 +304,8 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
self._begin_index = begin_index
def set_timesteps(
- self, num_inference_steps: int, device: Union[str, torch.device] = None, mu: Optional[float] = None
- ):
+ self, num_inference_steps: int, device: Optional[Union[str, torch.device]] = None, mu: Optional[float] = None
+ ) -> None:
"""
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
@@ -314,6 +314,8 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
The number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ mu (`float`, *optional*):
+ Optional mu parameter for dynamic shifting when using exponential time shift type.
"""
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://huggingface.co/papers/2305.08891
if mu is not None:
@@ -475,7 +477,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
return sample
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
- def _sigma_to_t(self, sigma, log_sigmas):
+ def _sigma_to_t(self, sigma: np.ndarray, log_sigmas: np.ndarray) -> np.ndarray:
"""
Convert sigma values to corresponding timestep values through interpolation.
@@ -512,7 +514,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
return t
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t
- def _sigma_to_alpha_sigma_t(self, sigma):
+ def _sigma_to_alpha_sigma_t(self, sigma: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Convert sigma values to alpha_t and sigma_t values.
@@ -534,7 +536,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
return alpha_t, sigma_t
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
- def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
+ def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
@@ -1030,7 +1032,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
return step_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index
- def _init_step_index(self, timestep):
+ def _init_step_index(self, timestep: Union[int, torch.Tensor]) -> None:
"""
Initialize the step_index counter for the scheduler.
@@ -1060,11 +1062,11 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
Args:
model_output (`torch.Tensor`):
The direct output from learned diffusion model.
- timestep (`int`):
+ timestep (`int` or `torch.Tensor`):
The current discrete timestep in the diffusion chain.
sample (`torch.Tensor`):
A current instance of a sample created by the diffusion process.
- return_dict (`bool`):
+ return_dict (`bool`, defaults to `True`):
Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`.
Returns:
@@ -1192,5 +1194,5 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
noisy_samples = alpha_t * original_samples + sigma_t * noise
return noisy_samples
- def __len__(self):
+ def __len__(self) -> int:
return self.config.num_train_timesteps
diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py
index 6be7618fcd..8628893200 100644
--- a/src/diffusers/utils/dummy_pt_objects.py
+++ b/src/diffusers/utils/dummy_pt_objects.py
@@ -257,6 +257,21 @@ class SmoothedEnergyGuidanceConfig(metaclass=DummyObject):
requires_backends(cls, ["torch"])
+class TaylorSeerCacheConfig(metaclass=DummyObject):
+ _backends = ["torch"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch"])
+
+
def apply_faster_cache(*args, **kwargs):
requires_backends(apply_faster_cache, ["torch"])
@@ -273,6 +288,10 @@ def apply_pyramid_attention_broadcast(*args, **kwargs):
requires_backends(apply_pyramid_attention_broadcast, ["torch"])
+def apply_taylorseer_cache(*args, **kwargs):
+ requires_backends(apply_taylorseer_cache, ["torch"])
+
+
class AllegroTransformer3DModel(metaclass=DummyObject):
_backends = ["torch"]
diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
index b62bfa734e..da64742518 100644
--- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py
+++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
@@ -227,6 +227,36 @@ class WanModularPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"])
+class ZImageAutoBlocks(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
+class ZImageModularPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class AllegroPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
@@ -3752,6 +3782,21 @@ class WuerstchenPriorPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"])
+class ZImageImg2ImgPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class ZImagePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
diff --git a/tests/hooks/test_group_offloading.py b/tests/hooks/test_group_offloading.py
index 96cbecfbf5..236094109d 100644
--- a/tests/hooks/test_group_offloading.py
+++ b/tests/hooks/test_group_offloading.py
@@ -19,6 +19,7 @@ import unittest
import torch
from parameterized import parameterized
+from diffusers import AutoencoderKL
from diffusers.hooks import HookRegistry, ModelHook
from diffusers.models import ModelMixin
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
@@ -149,6 +150,74 @@ class LayerOutputTrackerHook(ModelHook):
return output
+# Model with only standalone computational layers at top level
+class DummyModelWithStandaloneLayers(ModelMixin):
+ def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None:
+ super().__init__()
+
+ self.layer1 = torch.nn.Linear(in_features, hidden_features)
+ self.activation = torch.nn.ReLU()
+ self.layer2 = torch.nn.Linear(hidden_features, hidden_features)
+ self.layer3 = torch.nn.Linear(hidden_features, out_features)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.layer1(x)
+ x = self.activation(x)
+ x = self.layer2(x)
+ x = self.layer3(x)
+ return x
+
+
+# Model with deeply nested structure
+class DummyModelWithDeeplyNestedBlocks(ModelMixin):
+ def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None:
+ super().__init__()
+
+ self.input_layer = torch.nn.Linear(in_features, hidden_features)
+ self.container = ContainerWithNestedModuleList(hidden_features)
+ self.output_layer = torch.nn.Linear(hidden_features, out_features)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.input_layer(x)
+ x = self.container(x)
+ x = self.output_layer(x)
+ return x
+
+
+class ContainerWithNestedModuleList(torch.nn.Module):
+ def __init__(self, features: int) -> None:
+ super().__init__()
+
+ # Top-level computational layer
+ self.proj_in = torch.nn.Linear(features, features)
+
+ # Nested container with ModuleList
+ self.nested_container = NestedContainer(features)
+
+ # Another top-level computational layer
+ self.proj_out = torch.nn.Linear(features, features)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.proj_in(x)
+ x = self.nested_container(x)
+ x = self.proj_out(x)
+ return x
+
+
+class NestedContainer(torch.nn.Module):
+ def __init__(self, features: int) -> None:
+ super().__init__()
+
+ self.blocks = torch.nn.ModuleList([torch.nn.Linear(features, features), torch.nn.Linear(features, features)])
+ self.norm = torch.nn.LayerNorm(features)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ for block in self.blocks:
+ x = block(x)
+ x = self.norm(x)
+ return x
+
+
@require_torch_accelerator
class GroupOffloadTests(unittest.TestCase):
in_features = 64
@@ -340,7 +409,7 @@ class GroupOffloadTests(unittest.TestCase):
out = model(x)
self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match.")
- num_repeats = 4
+ num_repeats = 2
for i in range(num_repeats):
out_ref = model_ref(x)
out = model(x)
@@ -362,3 +431,138 @@ class GroupOffloadTests(unittest.TestCase):
self.assertLess(
cumulated_absmax, 1e-5, f"Output differences for {name} exceeded threshold: {cumulated_absmax:.5f}"
)
+
+ def test_vae_like_model_without_streams(self):
+ """Test VAE-like model with block-level offloading but without streams."""
+ if torch.device(torch_device).type not in ["cuda", "xpu"]:
+ return
+
+ config = self.get_autoencoder_kl_config()
+ model = AutoencoderKL(**config)
+
+ model_ref = AutoencoderKL(**config)
+ model_ref.load_state_dict(model.state_dict(), strict=True)
+ model_ref.to(torch_device)
+
+ model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=False)
+
+ x = torch.randn(2, 3, 32, 32).to(torch_device)
+
+ with torch.no_grad():
+ out_ref = model_ref(x).sample
+ out = model(x).sample
+
+ self.assertTrue(
+ torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match for VAE-like model without streams."
+ )
+
+ def test_model_with_only_standalone_layers(self):
+ """Test that models with only standalone layers (no ModuleList/Sequential) work with block-level offloading."""
+ if torch.device(torch_device).type not in ["cuda", "xpu"]:
+ return
+
+ model = DummyModelWithStandaloneLayers(in_features=64, hidden_features=128, out_features=64)
+
+ model_ref = DummyModelWithStandaloneLayers(in_features=64, hidden_features=128, out_features=64)
+ model_ref.load_state_dict(model.state_dict(), strict=True)
+ model_ref.to(torch_device)
+
+ model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True)
+
+ x = torch.randn(2, 64).to(torch_device)
+
+ with torch.no_grad():
+ for i in range(2):
+ out_ref = model_ref(x)
+ out = model(x)
+ self.assertTrue(
+ torch.allclose(out_ref, out, atol=1e-5),
+ f"Outputs do not match at iteration {i} for model with standalone layers.",
+ )
+
+ @parameterized.expand([("block_level",), ("leaf_level",)])
+ def test_standalone_conv_layers_with_both_offload_types(self, offload_type: str):
+ """Test that standalone Conv2d layers work correctly with both block-level and leaf-level offloading."""
+ if torch.device(torch_device).type not in ["cuda", "xpu"]:
+ return
+
+ config = self.get_autoencoder_kl_config()
+ model = AutoencoderKL(**config)
+
+ model_ref = AutoencoderKL(**config)
+ model_ref.load_state_dict(model.state_dict(), strict=True)
+ model_ref.to(torch_device)
+
+ model.enable_group_offload(torch_device, offload_type=offload_type, num_blocks_per_group=1, use_stream=True)
+
+ x = torch.randn(2, 3, 32, 32).to(torch_device)
+
+ with torch.no_grad():
+ out_ref = model_ref(x).sample
+ out = model(x).sample
+
+ self.assertTrue(
+ torch.allclose(out_ref, out, atol=1e-5),
+ f"Outputs do not match for standalone Conv layers with {offload_type}.",
+ )
+
+ def test_multiple_invocations_with_vae_like_model(self):
+ """Test that multiple forward passes work correctly with VAE-like model."""
+ if torch.device(torch_device).type not in ["cuda", "xpu"]:
+ return
+
+ config = self.get_autoencoder_kl_config()
+ model = AutoencoderKL(**config)
+
+ model_ref = AutoencoderKL(**config)
+ model_ref.load_state_dict(model.state_dict(), strict=True)
+ model_ref.to(torch_device)
+
+ model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True)
+
+ x = torch.randn(2, 3, 32, 32).to(torch_device)
+
+ with torch.no_grad():
+ for i in range(2):
+ out_ref = model_ref(x).sample
+ out = model(x).sample
+ self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), f"Outputs do not match at iteration {i}.")
+
+ def test_nested_container_parameters_offloading(self):
+ """Test that parameters from non-computational layers in nested containers are handled correctly."""
+ if torch.device(torch_device).type not in ["cuda", "xpu"]:
+ return
+
+ model = DummyModelWithDeeplyNestedBlocks(in_features=64, hidden_features=128, out_features=64)
+
+ model_ref = DummyModelWithDeeplyNestedBlocks(in_features=64, hidden_features=128, out_features=64)
+ model_ref.load_state_dict(model.state_dict(), strict=True)
+ model_ref.to(torch_device)
+
+ model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True)
+
+ x = torch.randn(2, 64).to(torch_device)
+
+ with torch.no_grad():
+ for i in range(2):
+ out_ref = model_ref(x)
+ out = model(x)
+ self.assertTrue(
+ torch.allclose(out_ref, out, atol=1e-5),
+ f"Outputs do not match at iteration {i} for nested parameters.",
+ )
+
+ def get_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None):
+ block_out_channels = block_out_channels or [2, 4]
+ norm_num_groups = norm_num_groups or 2
+ init_dict = {
+ "block_out_channels": block_out_channels,
+ "in_channels": 3,
+ "out_channels": 3,
+ "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
+ "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels),
+ "latent_channels": 4,
+ "norm_num_groups": norm_num_groups,
+ "layers_per_block": 1,
+ }
+ return init_dict
diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py
index ad5a6ba480..b9dfe93233 100644
--- a/tests/models/test_modeling_common.py
+++ b/tests/models/test_modeling_common.py
@@ -1791,7 +1791,6 @@ class ModelTesterMixin:
return model(**inputs_dict)[0]
model = self.model_class(**init_dict)
-
model.to(torch_device)
output_without_group_offloading = run_forward(model)
output_without_group_offloading = normalize_output(output_without_group_offloading)
@@ -1916,6 +1915,9 @@ class ModelTesterMixin:
offload_to_disk_path=tmpdir,
offload_type=offload_type,
num_blocks_per_group=num_blocks_per_group,
+ block_modules=model._group_offload_block_modules
+ if hasattr(model, "_group_offload_block_modules")
+ else None,
)
if not is_correct:
if extra_files:
diff --git a/tests/modular_pipelines/qwen/test_modular_pipeline_qwenimage.py b/tests/modular_pipelines/qwen/test_modular_pipeline_qwenimage.py
index 8d7600781b..f4bd27b7ea 100644
--- a/tests/modular_pipelines/qwen/test_modular_pipeline_qwenimage.py
+++ b/tests/modular_pipelines/qwen/test_modular_pipeline_qwenimage.py
@@ -26,6 +26,7 @@ from diffusers.modular_pipelines import (
QwenImageModularPipeline,
)
+from ...testing_utils import torch_device
from ..test_modular_pipelines_common import ModularGuiderTesterMixin, ModularPipelineTesterMixin
@@ -104,6 +105,16 @@ class TestQwenImageEditPlusModularPipelineFast(ModularPipelineTesterMixin, Modul
inputs["image"] = PIL.Image.new("RGB", (32, 32), 0)
return inputs
+ def test_multi_images_as_input(self):
+ inputs = self.get_dummy_inputs()
+ image = inputs.pop("image")
+ inputs["image"] = [image, image]
+
+ pipe = self.get_pipeline().to(torch_device)
+ _ = pipe(
+ **inputs,
+ )
+
@pytest.mark.xfail(condition=True, reason="Batch of multiple images needs to be revisited", strict=True)
def test_num_images_per_prompt(self):
super().test_num_images_per_prompt()
@@ -117,4 +128,4 @@ class TestQwenImageEditPlusModularPipelineFast(ModularPipelineTesterMixin, Modul
super().test_inference_batch_single_identical()
def test_guider_cfg(self):
- super().test_guider_cfg(1e-3)
+ super().test_guider_cfg(1e-6)
diff --git a/tests/pipelines/flux/test_pipeline_flux.py b/tests/pipelines/flux/test_pipeline_flux.py
index 1ddbd4ba3d..74499bfa60 100644
--- a/tests/pipelines/flux/test_pipeline_flux.py
+++ b/tests/pipelines/flux/test_pipeline_flux.py
@@ -29,6 +29,7 @@ from ..test_pipelines_common import (
FluxIPAdapterTesterMixin,
PipelineTesterMixin,
PyramidAttentionBroadcastTesterMixin,
+ TaylorSeerCacheTesterMixin,
check_qkv_fused_layers_exist,
)
@@ -39,6 +40,7 @@ class FluxPipelineFastTests(
PyramidAttentionBroadcastTesterMixin,
FasterCacheTesterMixin,
FirstBlockCacheTesterMixin,
+ TaylorSeerCacheTesterMixin,
unittest.TestCase,
):
pipeline_class = FluxPipeline
diff --git a/tests/pipelines/hunyuan_video/test_hunyuan_video.py b/tests/pipelines/hunyuan_video/test_hunyuan_video.py
index 4bdf3ee20e..57a6daebad 100644
--- a/tests/pipelines/hunyuan_video/test_hunyuan_video.py
+++ b/tests/pipelines/hunyuan_video/test_hunyuan_video.py
@@ -33,6 +33,7 @@ from ..test_pipelines_common import (
FirstBlockCacheTesterMixin,
PipelineTesterMixin,
PyramidAttentionBroadcastTesterMixin,
+ TaylorSeerCacheTesterMixin,
to_np,
)
@@ -45,6 +46,7 @@ class HunyuanVideoPipelineFastTests(
PyramidAttentionBroadcastTesterMixin,
FasterCacheTesterMixin,
FirstBlockCacheTesterMixin,
+ TaylorSeerCacheTesterMixin,
unittest.TestCase,
):
pipeline_class = HunyuanVideoPipeline
diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py
index 22570b2884..7db5f4da89 100644
--- a/tests/pipelines/test_pipelines_common.py
+++ b/tests/pipelines/test_pipelines_common.py
@@ -36,6 +36,7 @@ from diffusers.hooks import apply_group_offloading
from diffusers.hooks.faster_cache import FasterCacheBlockHook, FasterCacheDenoiserHook
from diffusers.hooks.first_block_cache import FirstBlockCacheConfig
from diffusers.hooks.pyramid_attention_broadcast import PyramidAttentionBroadcastHook
+from diffusers.hooks.taylorseer_cache import TaylorSeerCacheConfig
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FluxIPAdapterMixin, IPAdapterMixin
from diffusers.models.attention import AttentionModuleMixin
@@ -2924,6 +2925,57 @@ class FirstBlockCacheTesterMixin:
)
+class TaylorSeerCacheTesterMixin:
+ taylorseer_cache_config = TaylorSeerCacheConfig(
+ cache_interval=5,
+ disable_cache_before_step=10,
+ max_order=1,
+ taylor_factors_dtype=torch.bfloat16,
+ use_lite_mode=True,
+ )
+
+ def test_taylorseer_cache_inference(self, expected_atol: float = 0.1):
+ device = "cpu" # ensure determinism for the device-dependent torch.Generator
+
+ def create_pipe():
+ torch.manual_seed(0)
+ num_layers = 2
+ components = self.get_dummy_components(num_layers=num_layers)
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(device)
+ pipe.set_progress_bar_config(disable=None)
+ return pipe
+
+ def run_forward(pipe):
+ torch.manual_seed(0)
+ inputs = self.get_dummy_inputs(device)
+ inputs["num_inference_steps"] = 50
+ return pipe(**inputs)[0]
+
+ # Run inference without TaylorSeerCache
+ pipe = create_pipe()
+ output = run_forward(pipe).flatten()
+ original_image_slice = np.concatenate((output[:8], output[-8:]))
+
+ # Run inference with TaylorSeerCache enabled
+ pipe = create_pipe()
+ pipe.transformer.enable_cache(self.taylorseer_cache_config)
+ output = run_forward(pipe).flatten()
+ image_slice_fbc_enabled = np.concatenate((output[:8], output[-8:]))
+
+ # Run inference with TaylorSeerCache disabled
+ pipe.transformer.disable_cache()
+ output = run_forward(pipe).flatten()
+ image_slice_fbc_disabled = np.concatenate((output[:8], output[-8:]))
+
+ assert np.allclose(original_image_slice, image_slice_fbc_enabled, atol=expected_atol), (
+ "TaylorSeerCache outputs should not differ much."
+ )
+ assert np.allclose(original_image_slice, image_slice_fbc_disabled, atol=1e-4), (
+ "Outputs from normal inference and after disabling cache should not differ."
+ )
+
+
# Some models (e.g. unCLIP) are extremely likely to significantly deviate depending on which hardware is used.
# This helper function is used to check that the image doesn't deviate on average more than 10 pixels from a
# reference image.
diff --git a/tests/pipelines/z_image/test_z_image_img2img.py b/tests/pipelines/z_image/test_z_image_img2img.py
new file mode 100644
index 0000000000..91b3025b17
--- /dev/null
+++ b/tests/pipelines/z_image/test_z_image_img2img.py
@@ -0,0 +1,358 @@
+# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import gc
+import os
+import unittest
+
+import numpy as np
+import torch
+from transformers import Qwen2Tokenizer, Qwen3Config, Qwen3Model
+
+from diffusers import (
+ AutoencoderKL,
+ FlowMatchEulerDiscreteScheduler,
+ ZImageImg2ImgPipeline,
+ ZImageTransformer2DModel,
+)
+from diffusers.utils.testing_utils import floats_tensor
+
+from ...testing_utils import torch_device
+from ..pipeline_params import (
+ IMAGE_TO_IMAGE_IMAGE_PARAMS,
+ TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
+ TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
+)
+from ..test_pipelines_common import PipelineTesterMixin, to_np
+
+
+# Z-Image requires torch.use_deterministic_algorithms(False) due to complex64 RoPE operations
+# Cannot use enable_full_determinism() which sets it to True
+# Note: Z-Image does not support FP16 inference due to complex64 RoPE embeddings
+os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
+os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
+torch.use_deterministic_algorithms(False)
+torch.backends.cudnn.deterministic = True
+torch.backends.cudnn.benchmark = False
+if hasattr(torch.backends, "cuda"):
+ torch.backends.cuda.matmul.allow_tf32 = False
+
+
+class ZImageImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = ZImageImg2ImgPipeline
+ params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"cross_attention_kwargs"}
+ batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
+ image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
+ image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
+ required_optional_params = frozenset(
+ [
+ "num_inference_steps",
+ "strength",
+ "generator",
+ "latents",
+ "return_dict",
+ "callback_on_step_end",
+ "callback_on_step_end_tensor_inputs",
+ ]
+ )
+ supports_dduf = False
+ test_xformers_attention = False
+ test_layerwise_casting = True
+ test_group_offloading = True
+
+ def setUp(self):
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.synchronize()
+ torch.manual_seed(0)
+ if torch.cuda.is_available():
+ torch.cuda.manual_seed_all(0)
+
+ def tearDown(self):
+ super().tearDown()
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.synchronize()
+ torch.manual_seed(0)
+ if torch.cuda.is_available():
+ torch.cuda.manual_seed_all(0)
+
+ def get_dummy_components(self):
+ torch.manual_seed(0)
+ transformer = ZImageTransformer2DModel(
+ all_patch_size=(2,),
+ all_f_patch_size=(1,),
+ in_channels=16,
+ dim=32,
+ n_layers=2,
+ n_refiner_layers=1,
+ n_heads=2,
+ n_kv_heads=2,
+ norm_eps=1e-5,
+ qk_norm=True,
+ cap_feat_dim=16,
+ rope_theta=256.0,
+ t_scale=1000.0,
+ axes_dims=[8, 4, 4],
+ axes_lens=[256, 32, 32],
+ )
+
+ torch.manual_seed(0)
+ vae = AutoencoderKL(
+ in_channels=3,
+ out_channels=3,
+ down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
+ up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
+ block_out_channels=[32, 64],
+ layers_per_block=1,
+ latent_channels=16,
+ norm_num_groups=32,
+ sample_size=32,
+ scaling_factor=0.3611,
+ shift_factor=0.1159,
+ )
+
+ torch.manual_seed(0)
+ scheduler = FlowMatchEulerDiscreteScheduler()
+
+ torch.manual_seed(0)
+ config = Qwen3Config(
+ hidden_size=16,
+ intermediate_size=16,
+ num_hidden_layers=2,
+ num_attention_heads=2,
+ num_key_value_heads=2,
+ vocab_size=151936,
+ max_position_embeddings=512,
+ )
+ text_encoder = Qwen3Model(config)
+ tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration")
+
+ components = {
+ "transformer": transformer,
+ "vae": vae,
+ "scheduler": scheduler,
+ "text_encoder": text_encoder,
+ "tokenizer": tokenizer,
+ }
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ import random
+
+ if str(device).startswith("mps"):
+ generator = torch.manual_seed(seed)
+ else:
+ generator = torch.Generator(device=device).manual_seed(seed)
+
+ image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
+
+ inputs = {
+ "prompt": "dance monkey",
+ "negative_prompt": "bad quality",
+ "image": image,
+ "strength": 0.6,
+ "generator": generator,
+ "num_inference_steps": 2,
+ "guidance_scale": 3.0,
+ "cfg_normalization": False,
+ "cfg_truncation": 1.0,
+ "height": 32,
+ "width": 32,
+ "max_sequence_length": 16,
+ "output_type": "np",
+ }
+
+ return inputs
+
+ def test_inference(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ pipe.to(device)
+ pipe.set_progress_bar_config(disable=None)
+
+ inputs = self.get_dummy_inputs(device)
+ image = pipe(**inputs).images
+ generated_image = image[0]
+ self.assertEqual(generated_image.shape, (32, 32, 3))
+
+ def test_inference_batch_single_identical(self):
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.synchronize()
+ torch.manual_seed(0)
+ if torch.cuda.is_available():
+ torch.cuda.manual_seed_all(0)
+ self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1)
+
+ def test_num_images_per_prompt(self):
+ import inspect
+
+ sig = inspect.signature(self.pipeline_class.__call__)
+
+ if "num_images_per_prompt" not in sig.parameters:
+ return
+
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(torch_device)
+ pipe.set_progress_bar_config(disable=None)
+
+ batch_sizes = [1, 2]
+ num_images_per_prompts = [1, 2]
+
+ for batch_size in batch_sizes:
+ for num_images_per_prompt in num_images_per_prompts:
+ inputs = self.get_dummy_inputs(torch_device)
+
+ for key in inputs.keys():
+ if key in self.batch_params:
+ inputs[key] = batch_size * [inputs[key]]
+
+ images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
+
+ assert images.shape[0] == batch_size * num_images_per_prompt
+
+ del pipe
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.synchronize()
+
+ def test_attention_slicing_forward_pass(
+ self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3
+ ):
+ if not self.test_attention_slicing:
+ return
+
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ for component in pipe.components.values():
+ if hasattr(component, "set_default_attn_processor"):
+ component.set_default_attn_processor()
+ pipe.to(torch_device)
+ pipe.set_progress_bar_config(disable=None)
+
+ generator_device = "cpu"
+ inputs = self.get_dummy_inputs(generator_device)
+ output_without_slicing = pipe(**inputs)[0]
+
+ pipe.enable_attention_slicing(slice_size=1)
+ inputs = self.get_dummy_inputs(generator_device)
+ output_with_slicing1 = pipe(**inputs)[0]
+
+ pipe.enable_attention_slicing(slice_size=2)
+ inputs = self.get_dummy_inputs(generator_device)
+ output_with_slicing2 = pipe(**inputs)[0]
+
+ if test_max_difference:
+ max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max()
+ max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max()
+ self.assertLess(
+ max(max_diff1, max_diff2),
+ expected_max_diff,
+ "Attention slicing should not affect the inference results",
+ )
+
+ def test_vae_tiling(self, expected_diff_max: float = 0.3):
+ import random
+
+ generator_device = "cpu"
+ components = self.get_dummy_components()
+
+ pipe = self.pipeline_class(**components)
+ pipe.to("cpu")
+ pipe.set_progress_bar_config(disable=None)
+
+ # Without tiling
+ inputs = self.get_dummy_inputs(generator_device)
+ inputs["height"] = inputs["width"] = 128
+ # Generate a larger image for the input
+ inputs["image"] = floats_tensor((1, 3, 128, 128), rng=random.Random(0)).to("cpu")
+ output_without_tiling = pipe(**inputs)[0]
+
+ # With tiling (standard AutoencoderKL doesn't accept parameters)
+ pipe.vae.enable_tiling()
+ inputs = self.get_dummy_inputs(generator_device)
+ inputs["height"] = inputs["width"] = 128
+ inputs["image"] = floats_tensor((1, 3, 128, 128), rng=random.Random(0)).to("cpu")
+ output_with_tiling = pipe(**inputs)[0]
+
+ self.assertLess(
+ (to_np(output_without_tiling) - to_np(output_with_tiling)).max(),
+ expected_diff_max,
+ "VAE tiling should not affect the inference results",
+ )
+
+ def test_pipeline_with_accelerator_device_map(self, expected_max_difference=5e-4):
+ # Z-Image RoPE embeddings (complex64) have slightly higher numerical tolerance
+ super().test_pipeline_with_accelerator_device_map(expected_max_difference=expected_max_difference)
+
+ def test_group_offloading_inference(self):
+ # Block-level offloading conflicts with RoPE cache. Pipeline-level offloading (tested separately) works fine.
+ self.skipTest("Using test_pipeline_level_group_offloading_inference instead")
+
+ def test_save_load_float16(self, expected_max_diff=1e-2):
+ # Z-Image does not support FP16 due to complex64 RoPE embeddings
+ self.skipTest("Z-Image does not support FP16 inference")
+
+ def test_float16_inference(self, expected_max_diff=5e-2):
+ # Z-Image does not support FP16 due to complex64 RoPE embeddings
+ self.skipTest("Z-Image does not support FP16 inference")
+
+ def test_strength_parameter(self):
+ """Test that strength parameter affects the output correctly."""
+ device = "cpu"
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ pipe.to(device)
+ pipe.set_progress_bar_config(disable=None)
+
+ # Test with different strength values
+ inputs_low_strength = self.get_dummy_inputs(device)
+ inputs_low_strength["strength"] = 0.2
+
+ inputs_high_strength = self.get_dummy_inputs(device)
+ inputs_high_strength["strength"] = 0.8
+
+ # Both should complete without errors
+ output_low = pipe(**inputs_low_strength).images[0]
+ output_high = pipe(**inputs_high_strength).images[0]
+
+ # Outputs should be different (different amount of transformation)
+ self.assertFalse(np.allclose(output_low, output_high, atol=1e-3))
+
+ def test_invalid_strength(self):
+ """Test that invalid strength values raise appropriate errors."""
+ device = "cpu"
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ pipe.to(device)
+
+ inputs = self.get_dummy_inputs(device)
+
+ # Test strength < 0
+ inputs["strength"] = -0.1
+ with self.assertRaises(ValueError):
+ pipe(**inputs)
+
+ # Test strength > 1
+ inputs["strength"] = 1.5
+ with self.assertRaises(ValueError):
+ pipe(**inputs)
diff --git a/tests/testing_utils.py b/tests/testing_utils.py
index 6ed7e3467d..4550813259 100644
--- a/tests/testing_utils.py
+++ b/tests/testing_utils.py
@@ -1424,6 +1424,8 @@ if is_torch_available():
offload_to_disk_path: str,
offload_type: str,
num_blocks_per_group: Optional[int] = None,
+ block_modules: Optional[List[str]] = None,
+ module_prefix: str = "",
) -> Set[str]:
expected_files = set()
@@ -1435,23 +1437,36 @@ if is_torch_available():
if num_blocks_per_group is None:
raise ValueError("num_blocks_per_group must be provided for 'block_level' offloading.")
- # Handle groups of ModuleList and Sequential blocks
+ block_modules_set = set(block_modules) if block_modules is not None else set()
+
+ modules_with_group_offloading = set()
unmatched_modules = []
for name, submodule in module.named_children():
- if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
- unmatched_modules.append(module)
- continue
+ if name in block_modules_set:
+ new_prefix = f"{module_prefix}{name}." if module_prefix else f"{name}."
+ submodule_files = _get_expected_safetensors_files(
+ submodule, offload_to_disk_path, offload_type, num_blocks_per_group, block_modules, new_prefix
+ )
+ expected_files.update(submodule_files)
+ modules_with_group_offloading.add(name)
- for i in range(0, len(submodule), num_blocks_per_group):
- current_modules = submodule[i : i + num_blocks_per_group]
- if not current_modules:
- continue
- group_id = f"{name}_{i}_{i + len(current_modules) - 1}"
- expected_files.add(get_hashed_filename(group_id))
+ elif isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
+ for i in range(0, len(submodule), num_blocks_per_group):
+ current_modules = submodule[i : i + num_blocks_per_group]
+ if not current_modules:
+ continue
+ group_id = f"{module_prefix}{name}_{i}_{i + len(current_modules) - 1}"
+ expected_files.add(get_hashed_filename(group_id))
+ for j in range(i, i + len(current_modules)):
+ modules_with_group_offloading.add(f"{name}.{j}")
+ else:
+ unmatched_modules.append(submodule)
- # Handle the group for unmatched top-level modules and parameters
- for module in unmatched_modules:
- expected_files.add(get_hashed_filename(f"{module.__class__.__name__}_unmatched_group"))
+ parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading)
+ buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading)
+
+ if len(unmatched_modules) > 0 or len(parameters) > 0 or len(buffers) > 0:
+ expected_files.add(get_hashed_filename(f"{module_prefix}{module.__class__.__name__}_unmatched_group"))
elif offload_type == "leaf_level":
# Handle leaf-level module groups
@@ -1492,12 +1507,13 @@ if is_torch_available():
offload_to_disk_path: str,
offload_type: str,
num_blocks_per_group: Optional[int] = None,
+ block_modules: Optional[List[str]] = None,
) -> bool:
if not os.path.isdir(offload_to_disk_path):
return False, None, None
expected_files = _get_expected_safetensors_files(
- module, offload_to_disk_path, offload_type, num_blocks_per_group
+ module, offload_to_disk_path, offload_type, num_blocks_per_group, block_modules
)
actual_files = set(glob.glob(os.path.join(offload_to_disk_path, "*.safetensors")))
missing_files = expected_files - actual_files