From 84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Tue, 7 Nov 2023 22:08:12 +0530 Subject: [PATCH] Make sure DDPM and `diffusers` can be used without Transformers (#5668) * fix: import bug * fix * fix * fix import utils for lcm * fix: pixart alpha init * Fix --------- Co-authored-by: Patrick von Platen --- src/diffusers/loaders.py | 6 +-- .../latent_consistency_models/__init__.py | 38 +++++++++++--- .../pipelines/pixart_alpha/__init__.py | 49 ++++++++++++++++++- 3 files changed, 82 insertions(+), 11 deletions(-) diff --git a/src/diffusers/loaders.py b/src/diffusers/loaders.py index 87e0e16402..2fa1c61fd8 100644 --- a/src/diffusers/loaders.py +++ b/src/diffusers/loaders.py @@ -2390,7 +2390,7 @@ class LoraLoaderMixin: def set_adapters_for_text_encoder( self, adapter_names: Union[List[str], str], - text_encoder: Optional[PreTrainedModel] = None, + text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 text_encoder_weights: List[float] = None, ): """ @@ -2429,7 +2429,7 @@ class LoraLoaderMixin: ) set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights) - def disable_lora_for_text_encoder(self, text_encoder: Optional[PreTrainedModel] = None): + def disable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None): """ Disables the LoRA layers for the text encoder. @@ -2446,7 +2446,7 @@ class LoraLoaderMixin: raise ValueError("Text Encoder not found.") set_adapter_layers(text_encoder, enabled=False) - def enable_lora_for_text_encoder(self, text_encoder: Optional[PreTrainedModel] = None): + def enable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None): """ Enables the LoRA layers for the text encoder. diff --git a/src/diffusers/pipelines/latent_consistency_models/__init__.py b/src/diffusers/pipelines/latent_consistency_models/__init__.py index 14002058cd..8f79d3c477 100644 --- a/src/diffusers/pipelines/latent_consistency_models/__init__.py +++ b/src/diffusers/pipelines/latent_consistency_models/__init__.py @@ -1,19 +1,40 @@ from typing import TYPE_CHECKING from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, ) -_import_structure = { - "pipeline_latent_consistency_img2img": ["LatentConsistencyModelImg2ImgPipeline"], - "pipeline_latent_consistency_text2img": ["LatentConsistencyModelPipeline"], -} +_dummy_objects = {} +_import_structure = {} -if TYPE_CHECKING: - from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline - from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_latent_consistency_img2img"] = ["LatentConsistencyModelImg2ImgPipeline"] + _import_structure["pipeline_latent_consistency_text2img"] = ["LatentConsistencyModelPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline + from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline else: import sys @@ -24,3 +45,6 @@ else: _import_structure, module_spec=__spec__, ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/pipelines/pixart_alpha/__init__.py b/src/diffusers/pipelines/pixart_alpha/__init__.py index e0d238907a..0bfa28fcde 100644 --- a/src/diffusers/pipelines/pixart_alpha/__init__.py +++ b/src/diffusers/pipelines/pixart_alpha/__init__.py @@ -1 +1,48 @@ -from .pipeline_pixart_alpha import PixArtAlphaPipeline +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_pixart_alpha"] = ["PixArtAlphaPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_pixart_alpha import PixArtAlphaPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value)