1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-29 07:22:12 +03:00

Apply style fixes

This commit is contained in:
github-actions[bot]
2025-06-24 08:20:04 +00:00
parent 932914f45d
commit 13eca6ef2d

View File

@@ -1407,22 +1407,18 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
def compile_repeated_blocks(self, *args, **kwargs):
"""
Compiles *only* the frequently repeated sub-modules of a model (e.g. the
Transformer layers) instead of compiling the entire model. This
technique—often called **regional compilation** (see the PyTorch recipe
https://docs.pytorch.org/tutorials/recipes/regional_compilation.html)
can reduce end-to-end compile time substantially, while preserving the
runtime speed-ups you would expect from a full `torch.compile`.
Compiles *only* the frequently repeated sub-modules of a model (e.g. the Transformer layers) instead of
compiling the entire model. This technique—often called **regional compilation** (see the PyTorch recipe
https://docs.pytorch.org/tutorials/recipes/regional_compilation.html) can reduce end-to-end compile time
substantially, while preserving the runtime speed-ups you would expect from a full `torch.compile`.
The set of sub-modules to compile is discovered by the presence of
**`_repeated_blocks`** attribute in the model definition. Define this
attribute on your model subclass as a list/tuple of class names
(strings). Every module whose class name matches will be compiled.
The set of sub-modules to compile is discovered by the presence of **`_repeated_blocks`** attribute in the
model definition. Define this attribute on your model subclass as a list/tuple of class names (strings). Every
module whose class name matches will be compiled.
Once discovered, each matching sub-module is compiled by calling
`submodule.compile(*args, **kwargs)`. Any positional or keyword
arguments you supply to `compile_repeated_blocks` are forwarded
verbatim to `torch.compile`.
Once discovered, each matching sub-module is compiled by calling `submodule.compile(*args, **kwargs)`. Any
positional or keyword arguments you supply to `compile_repeated_blocks` are forwarded verbatim to
`torch.compile`.
"""
repeated_blocks = getattr(self, "_repeated_blocks", None)