From bc3c73ad0b75ee550fdcce6e124d5a222834d6ed Mon Sep 17 00:00:00 2001 From: YiYi Xu Date: Mon, 5 Aug 2024 08:04:51 -1000 Subject: [PATCH] add sentencepiece as a soft dependency (#9065) * add sentencepiece as soft dependency for kolors * up --------- Co-authored-by: Sayak Paul --- src/diffusers/__init__.py | 27 +++++++-- src/diffusers/pipelines/__init__.py | 40 +++++++++---- src/diffusers/pipelines/auto_pipeline.py | 10 +++- src/diffusers/pipelines/kolors/__init__.py | 11 ++-- src/diffusers/utils/__init__.py | 1 + ..._transformers_and_sentencepiece_objects.py | 32 ++++++++++ .../dummy_torch_and_transformers_objects.py | 60 ------------------- src/diffusers/utils/import_utils.py | 18 ++++++ tests/pipelines/kolors/test_kolors.py | 3 +- 9 files changed, 114 insertions(+), 88 deletions(-) create mode 100644 src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index ce437a5e6b..39da57cec0 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -12,6 +12,7 @@ from .utils import ( is_note_seq_available, is_onnx_available, is_scipy_available, + is_sentencepiece_available, is_torch_available, is_torchsde_available, is_transformers_available, @@ -246,8 +247,6 @@ else: "AuraFlowPipeline", "BlipDiffusionControlNetPipeline", "BlipDiffusionPipeline", - "ChatGLMModel", - "ChatGLMTokenizer", "CLIPImageProjection", "CycleDiffusionPipeline", "FluxPipeline", @@ -386,6 +385,19 @@ except OptionalDependencyNotAvailable: else: _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"]) +try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_and_sentencepiece_objects"] = [ + name for name in dir(dummy_torch_and_transformers_and_sentencepiece_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["KolorsImg2ImgPipeline", "KolorsPipeline"]) + try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() @@ -670,8 +682,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: AudioLDM2UNet2DConditionModel, AudioLDMPipeline, AuraFlowPipeline, - ChatGLMModel, - ChatGLMTokenizer, CLIPImageProjection, CycleDiffusionPipeline, FluxPipeline, @@ -705,8 +715,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, - KolorsImg2ImgPipeline, - KolorsPipeline, LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline, LattePipeline, @@ -804,6 +812,13 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: else: from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline + try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_and_sentencepiece_objects import * # noqa F403 + else: + from .pipelines import KolorsImg2ImgPipeline, KolorsPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 46e93ea776..69003764b3 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -10,6 +10,7 @@ from ..utils import ( is_librosa_available, is_note_seq_available, is_onnx_available, + is_sentencepiece_available, is_torch_available, is_torch_npu_available, is_transformers_available, @@ -205,12 +206,6 @@ else: "Kandinsky3Img2ImgPipeline", "Kandinsky3Pipeline", ] - _import_structure["kolors"] = [ - "KolorsPipeline", - "KolorsImg2ImgPipeline", - "ChatGLMModel", - "ChatGLMTokenizer", - ] _import_structure["latent_consistency_models"] = [ "LatentConsistencyModelImg2ImgPipeline", "LatentConsistencyModelPipeline", @@ -350,6 +345,22 @@ else: "StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline", ] + +try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import ( + dummy_torch_and_transformers_and_sentencepiece_objects, + ) + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) +else: + _import_structure["kolors"] = [ + "KolorsPipeline", + "KolorsImg2ImgPipeline", + ] + try: if not is_flax_available(): raise OptionalDependencyNotAvailable() @@ -507,12 +518,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline, ) - from .kolors import ( - ChatGLMModel, - ChatGLMTokenizer, - KolorsImg2ImgPipeline, - KolorsPipeline, - ) from .latent_consistency_models import ( LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline, @@ -642,6 +647,17 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: StableDiffusionXLKDiffusionPipeline, ) + try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_sentencepiece_objects import * + else: + from .kolors import ( + KolorsImg2ImgPipeline, + KolorsPipeline, + ) + try: if not is_flax_available(): raise OptionalDependencyNotAvailable() diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index d45d616ded..8c74c4797d 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -18,6 +18,7 @@ from collections import OrderedDict from huggingface_hub.utils import validate_hf_hub_args from ..configuration_utils import ConfigMixin +from ..utils import is_sentencepiece_available from .aura_flow import AuraFlowPipeline from .controlnet import ( StableDiffusionControlNetImg2ImgPipeline, @@ -47,7 +48,6 @@ from .kandinsky2_2 import ( KandinskyV22Pipeline, ) from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline -from .kolors import KolorsImg2ImgPipeline, KolorsPipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline from .pag import ( HunyuanDiTPAGPipeline, @@ -103,7 +103,6 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict( ("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGPipeline), ("pixart-sigma-pag", PixArtSigmaPAGPipeline), ("auraflow", AuraFlowPipeline), - ("kolors", KolorsPipeline), ("flux", FluxPipeline), ] ) @@ -121,7 +120,6 @@ AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict( ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline), ("stable-diffusion-xl-pag", StableDiffusionXLPAGImg2ImgPipeline), ("lcm", LatentConsistencyModelImg2ImgPipeline), - ("kolors", KolorsImg2ImgPipeline), ] ) @@ -160,6 +158,12 @@ _AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict( ] ) +if is_sentencepiece_available(): + from .kolors import KolorsPipeline + + AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline + SUPPORTED_TASKS_MAPPINGS = [ AUTO_TEXT2IMAGE_PIPELINES_MAPPING, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, diff --git a/src/diffusers/pipelines/kolors/__init__.py b/src/diffusers/pipelines/kolors/__init__.py index 843ee93c25..671d22e9f4 100644 --- a/src/diffusers/pipelines/kolors/__init__.py +++ b/src/diffusers/pipelines/kolors/__init__.py @@ -5,6 +5,7 @@ from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, + is_sentencepiece_available, is_torch_available, is_transformers_available, ) @@ -14,12 +15,12 @@ _dummy_objects = {} _import_structure = {} try: - if not (is_transformers_available() and is_torch_available()): + if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ...utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403 - _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) else: _import_structure["pipeline_kolors"] = ["KolorsPipeline"] _import_structure["pipeline_kolors_img2img"] = ["KolorsImg2ImgPipeline"] @@ -28,10 +29,10 @@ else: if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: - if not (is_transformers_available() and is_torch_available()): + if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ...utils.dummy_torch_and_transformers_and_sentencepiece_objects import * else: from .pipeline_kolors import KolorsPipeline diff --git a/src/diffusers/utils/__init__.py b/src/diffusers/utils/__init__.py index d2633d2ec9..c7ea2bcc5b 100644 --- a/src/diffusers/utils/__init__.py +++ b/src/diffusers/utils/__init__.py @@ -78,6 +78,7 @@ from .import_utils import ( is_peft_version, is_safetensors_available, is_scipy_available, + is_sentencepiece_available, is_tensorboard_available, is_timm_available, is_torch_available, diff --git a/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py new file mode 100644 index 0000000000..a70d003f7f --- /dev/null +++ b/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py @@ -0,0 +1,32 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class KolorsImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + +class KolorsPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 63151cb867..ad3a1663da 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -242,36 +242,6 @@ class AuraFlowPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) -class ChatGLMModel(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - -class ChatGLMTokenizer(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - class CLIPImageProjection(metaclass=DummyObject): _backends = ["torch", "transformers"] @@ -767,36 +737,6 @@ class KandinskyV22PriorPipeline(metaclass=DummyObject): requires_backends(cls, ["torch", "transformers"]) -class KolorsImg2ImgPipeline(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - -class KolorsPipeline(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] diff --git a/src/diffusers/utils/import_utils.py b/src/diffusers/utils/import_utils.py index 44477df2e2..09cb715a60 100644 --- a/src/diffusers/utils/import_utils.py +++ b/src/diffusers/utils/import_utils.py @@ -294,6 +294,13 @@ try: except importlib_metadata.PackageNotFoundError: _torchvision_available = False +_sentencepiece_available = importlib.util.find_spec("sentencepiece") is not None +try: + _sentencepiece_version = importlib_metadata.version("sentencepiece") + logger.info(f"Successfully imported sentencepiece version {_sentencepiece_version}") +except importlib_metadata.PackageNotFoundError: + _sentencepiece_available = False + _matplotlib_available = importlib.util.find_spec("matplotlib") is not None try: _matplotlib_version = importlib_metadata.version("matplotlib") @@ -436,6 +443,10 @@ def is_google_colab(): return _is_google_colab +def is_sentencepiece_available(): + return _sentencepiece_available + + # docstyle-ignore FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the @@ -553,6 +564,12 @@ SAFETENSORS_IMPORT_ERROR = """ {0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors` """ +# docstyle-ignore +SENTENCEPIECE_IMPORT_ERROR = """ +{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece` +""" + + # docstyle-ignore BITSANDBYTES_IMPORT_ERROR = """ {0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes` @@ -581,6 +598,7 @@ BACKENDS_MAPPING = OrderedDict( ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)), ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)), + ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ] ) diff --git a/tests/pipelines/kolors/test_kolors.py b/tests/pipelines/kolors/test_kolors.py index 3f7fcaf595..719a5ef101 100644 --- a/tests/pipelines/kolors/test_kolors.py +++ b/tests/pipelines/kolors/test_kolors.py @@ -20,12 +20,11 @@ import torch from diffusers import ( AutoencoderKL, - ChatGLMModel, - ChatGLMTokenizer, EulerDiscreteScheduler, KolorsPipeline, UNet2DConditionModel, ) +from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import (