1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

add sentencepiece as a soft dependency (#9065)

* add sentencepiece as  soft dependency for kolors

* up

---------

Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
This commit is contained in:
YiYi Xu
2024-08-05 08:04:51 -10:00
committed by GitHub
parent 5934873b8f
commit bc3c73ad0b
9 changed files with 114 additions and 88 deletions

View File

@@ -12,6 +12,7 @@ from .utils import (
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_sentencepiece_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
@@ -246,8 +247,6 @@ else:
"AuraFlowPipeline",
"BlipDiffusionControlNetPipeline",
"BlipDiffusionPipeline",
"ChatGLMModel",
"ChatGLMTokenizer",
"CLIPImageProjection",
"CycleDiffusionPipeline",
"FluxPipeline",
@@ -386,6 +385,19 @@ except OptionalDependencyNotAvailable:
else:
_import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"])
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403
_import_structure["utils.dummy_torch_and_transformers_and_sentencepiece_objects"] = [
name for name in dir(dummy_torch_and_transformers_and_sentencepiece_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["KolorsImg2ImgPipeline", "KolorsPipeline"])
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
@@ -670,8 +682,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
AudioLDM2UNet2DConditionModel,
AudioLDMPipeline,
AuraFlowPipeline,
ChatGLMModel,
ChatGLMTokenizer,
CLIPImageProjection,
CycleDiffusionPipeline,
FluxPipeline,
@@ -705,8 +715,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
KandinskyV22Pipeline,
KandinskyV22PriorEmb2EmbPipeline,
KandinskyV22PriorPipeline,
KolorsImg2ImgPipeline,
KolorsPipeline,
LatentConsistencyModelImg2ImgPipeline,
LatentConsistencyModelPipeline,
LattePipeline,
@@ -804,6 +812,13 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
else:
from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_sentencepiece_objects import * # noqa F403
else:
from .pipelines import KolorsImg2ImgPipeline, KolorsPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()

View File

@@ -10,6 +10,7 @@ from ..utils import (
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_sentencepiece_available,
is_torch_available,
is_torch_npu_available,
is_transformers_available,
@@ -205,12 +206,6 @@ else:
"Kandinsky3Img2ImgPipeline",
"Kandinsky3Pipeline",
]
_import_structure["kolors"] = [
"KolorsPipeline",
"KolorsImg2ImgPipeline",
"ChatGLMModel",
"ChatGLMTokenizer",
]
_import_structure["latent_consistency_models"] = [
"LatentConsistencyModelImg2ImgPipeline",
"LatentConsistencyModelPipeline",
@@ -350,6 +345,22 @@ else:
"StableDiffusionKDiffusionPipeline",
"StableDiffusionXLKDiffusionPipeline",
]
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils import (
dummy_torch_and_transformers_and_sentencepiece_objects,
)
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects))
else:
_import_structure["kolors"] = [
"KolorsPipeline",
"KolorsImg2ImgPipeline",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
@@ -507,12 +518,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
Kandinsky3Img2ImgPipeline,
Kandinsky3Pipeline,
)
from .kolors import (
ChatGLMModel,
ChatGLMTokenizer,
KolorsImg2ImgPipeline,
KolorsPipeline,
)
from .latent_consistency_models import (
LatentConsistencyModelImg2ImgPipeline,
LatentConsistencyModelPipeline,
@@ -642,6 +647,17 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
StableDiffusionXLKDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_transformers_and_sentencepiece_objects import *
else:
from .kolors import (
KolorsImg2ImgPipeline,
KolorsPipeline,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()

View File

@@ -18,6 +18,7 @@ from collections import OrderedDict
from huggingface_hub.utils import validate_hf_hub_args
from ..configuration_utils import ConfigMixin
from ..utils import is_sentencepiece_available
from .aura_flow import AuraFlowPipeline
from .controlnet import (
StableDiffusionControlNetImg2ImgPipeline,
@@ -47,7 +48,6 @@ from .kandinsky2_2 import (
KandinskyV22Pipeline,
)
from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline
from .kolors import KolorsImg2ImgPipeline, KolorsPipeline
from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline
from .pag import (
HunyuanDiTPAGPipeline,
@@ -103,7 +103,6 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict(
("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGPipeline),
("pixart-sigma-pag", PixArtSigmaPAGPipeline),
("auraflow", AuraFlowPipeline),
("kolors", KolorsPipeline),
("flux", FluxPipeline),
]
)
@@ -121,7 +120,6 @@ AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict(
("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline),
("stable-diffusion-xl-pag", StableDiffusionXLPAGImg2ImgPipeline),
("lcm", LatentConsistencyModelImg2ImgPipeline),
("kolors", KolorsImg2ImgPipeline),
]
)
@@ -160,6 +158,12 @@ _AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict(
]
)
if is_sentencepiece_available():
from .kolors import KolorsPipeline
AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline
SUPPORTED_TASKS_MAPPINGS = [
AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,

View File

@@ -5,6 +5,7 @@ from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_sentencepiece_available,
is_torch_available,
is_transformers_available,
)
@@ -14,12 +15,12 @@ _dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
from ...utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects))
else:
_import_structure["pipeline_kolors"] = ["KolorsPipeline"]
_import_structure["pipeline_kolors_img2img"] = ["KolorsImg2ImgPipeline"]
@@ -28,10 +29,10 @@ else:
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
from ...utils.dummy_torch_and_transformers_and_sentencepiece_objects import *
else:
from .pipeline_kolors import KolorsPipeline

View File

@@ -78,6 +78,7 @@ from .import_utils import (
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,

View File

@@ -0,0 +1,32 @@
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class KolorsImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers", "sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])
class KolorsPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers", "sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])

View File

@@ -242,36 +242,6 @@ class AuraFlowPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"])
class ChatGLMModel(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class ChatGLMTokenizer(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CLIPImageProjection(metaclass=DummyObject):
_backends = ["torch", "transformers"]
@@ -767,36 +737,6 @@ class KandinskyV22PriorPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"])
class KolorsImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KolorsPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]

View File

@@ -294,6 +294,13 @@ try:
except importlib_metadata.PackageNotFoundError:
_torchvision_available = False
_sentencepiece_available = importlib.util.find_spec("sentencepiece") is not None
try:
_sentencepiece_version = importlib_metadata.version("sentencepiece")
logger.info(f"Successfully imported sentencepiece version {_sentencepiece_version}")
except importlib_metadata.PackageNotFoundError:
_sentencepiece_available = False
_matplotlib_available = importlib.util.find_spec("matplotlib") is not None
try:
_matplotlib_version = importlib_metadata.version("matplotlib")
@@ -436,6 +443,10 @@ def is_google_colab():
return _is_google_colab
def is_sentencepiece_available():
return _sentencepiece_available
# docstyle-ignore
FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
@@ -553,6 +564,12 @@ SAFETENSORS_IMPORT_ERROR = """
{0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors`
"""
# docstyle-ignore
SENTENCEPIECE_IMPORT_ERROR = """
{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece`
"""
# docstyle-ignore
BITSANDBYTES_IMPORT_ERROR = """
{0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes`
@@ -581,6 +598,7 @@ BACKENDS_MAPPING = OrderedDict(
("peft", (is_peft_available, PEFT_IMPORT_ERROR)),
("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)),
("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)),
("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
]
)

View File

@@ -20,12 +20,11 @@ import torch
from diffusers import (
AutoencoderKL,
ChatGLMModel,
ChatGLMTokenizer,
EulerDiscreteScheduler,
KolorsPipeline,
UNet2DConditionModel,
)
from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import (