1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Revert "path change for StableDiffusionLoraLoaderMixin"

This reverts commit 6d881198f3.
This commit is contained in:
sayakpaul
2025-12-08 13:56:18 +05:30
parent 6d881198f3
commit b4432fffab
2 changed files with 120 additions and 169 deletions

View File

@@ -154,8 +154,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -367,7 +366,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
Speed up model loading only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
@@ -435,7 +434,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
@@ -470,8 +469,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
text_encoder_2_lora_adapter_metadata=None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -512,7 +510,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -524,7 +522,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
def unfuse_lora(self, components: List[str] = ["unet", "text_encoder", "text_encoder_2"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -550,7 +548,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -604,8 +602,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -674,8 +671,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -734,7 +730,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
@@ -770,8 +766,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
text_encoder_2_lora_adapter_metadata=None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -813,7 +808,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -826,7 +821,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.unfuse_lora with unet->transformer
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -848,7 +843,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -903,8 +898,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -950,8 +944,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -983,8 +976,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -1016,7 +1008,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -1029,7 +1021,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -1056,7 +1048,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -1185,15 +1177,15 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
All kwargs are forwarded to `self.lora_state_dict`.
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details
on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
loaded.
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for
more details on how the state dict is loaded into `self.transformer`.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`].
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
@@ -1201,9 +1193,9 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
`Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`].
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -1310,8 +1302,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
hotswap: bool = False,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"):
raise ValueError(
@@ -1424,7 +1415,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
@@ -1516,7 +1507,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer
@@ -1855,8 +1846,7 @@ class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin):
hotswap: bool = False,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"):
raise ValueError(
@@ -1915,7 +1905,7 @@ class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
@@ -2006,7 +1996,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -2060,8 +2050,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -2107,8 +2096,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -2139,8 +2127,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -2171,7 +2158,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -2183,7 +2170,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -2205,7 +2192,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -2260,8 +2247,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -2307,8 +2293,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -2340,8 +2325,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -2373,7 +2357,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -2386,7 +2370,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -2407,7 +2391,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -2466,8 +2450,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -2513,8 +2496,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -2546,8 +2528,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -2579,7 +2560,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -2592,7 +2573,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -2614,7 +2595,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -2669,8 +2650,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -2716,8 +2696,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -2749,8 +2728,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -2782,7 +2760,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -2795,7 +2773,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -2816,7 +2794,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -2875,8 +2853,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -2922,8 +2899,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -2955,8 +2931,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -2988,7 +2963,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -3001,7 +2976,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -3022,7 +2997,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -3082,8 +3057,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -3129,8 +3103,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -3162,8 +3135,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -3195,7 +3167,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -3208,7 +3180,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -3514,7 +3486,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -3620,8 +3592,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -3691,8 +3662,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -3724,8 +3694,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -3757,7 +3726,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -3770,7 +3739,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -3792,7 +3761,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -3900,8 +3869,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -3971,8 +3939,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -4004,8 +3971,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -4037,7 +4003,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -4050,7 +4016,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -4072,7 +4038,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -4127,8 +4093,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -4174,8 +4139,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -4207,8 +4171,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -4240,7 +4203,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -4253,7 +4216,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -4274,7 +4237,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -4333,8 +4296,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -4380,8 +4342,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -4413,8 +4374,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -4446,7 +4406,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -4459,7 +4419,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -4480,7 +4440,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -4542,8 +4502,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -4589,8 +4548,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -4622,8 +4580,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -4655,7 +4612,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -4668,7 +4625,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -4689,7 +4646,7 @@ class ZImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -4751,8 +4708,7 @@ class ZImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -4798,8 +4754,7 @@ class ZImageLoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -4831,8 +4786,7 @@ class ZImageLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -4864,7 +4818,7 @@ class ZImageLoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -4877,7 +4831,7 @@ class ZImageLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)
@@ -4898,7 +4852,7 @@ class Flux2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
"""
# Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both.
@@ -4957,8 +4911,7 @@ class Flux2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -5004,8 +4957,7 @@ class Flux2LoraLoaderMixin(LoraBaseMixin):
metadata=None,
):
"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
"""
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError(
@@ -5037,8 +4989,7 @@ class Flux2LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more
information.
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
"""
lora_layers = {}
lora_metadata = {}
@@ -5070,7 +5021,7 @@ class Flux2LoraLoaderMixin(LoraBaseMixin):
**kwargs,
):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
super().fuse_lora(
components=components,
@@ -5083,7 +5034,7 @@ class Flux2LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r"""
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
"""
super().unfuse_lora(components=components, **kwargs)

View File

@@ -59,18 +59,18 @@ class StableDiffusionLoraLoaderMixin(LoraBaseMixin):
All kwargs are forwarded to `self.lora_state_dict`.
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details
on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
loaded.
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more
details on how the state dict is loaded into `self.unet`.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is
loaded into `self.unet`.
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for
more details on how the state dict is loaded into `self.text_encoder`.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state
dict is loaded into `self.text_encoder`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`].
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
@@ -101,7 +101,7 @@ class StableDiffusionLoraLoaderMixin(LoraBaseMixin):
limitations to this technique, which are documented here:
https://huggingface.co/docs/peft/main/en/package_reference/hotswap
kwargs (`dict`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.lora_state_dict`].
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
@@ -292,7 +292,7 @@ class StableDiffusionLoraLoaderMixin(LoraBaseMixin):
Speed up model loading only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
@@ -359,7 +359,7 @@ class StableDiffusionLoraLoaderMixin(LoraBaseMixin):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~pipelines.stable_diffusion.lora_utils.StableDiffusionLoraLoaderMixin.load_lora_weights`].
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.