mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
Fix all missing optional import statements from pipeline folders (#4272)
* fix circular import * fix imports when watermark not specified * fix all pipelines
This commit is contained in:
committed by
GitHub
parent
ff8f58086b
commit
b37dc3b3cd
@@ -25,7 +25,6 @@ import torch.nn.functional as F
|
||||
from huggingface_hub import hf_hub_download
|
||||
from torch import nn
|
||||
|
||||
from .models.lora import LoRACompatibleConv, LoRACompatibleLinear, LoRAConv2dLayer, LoRALinearLayer
|
||||
from .utils import (
|
||||
DIFFUSERS_CACHE,
|
||||
HF_HUB_OFFLINE,
|
||||
@@ -69,7 +68,7 @@ CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensor
|
||||
class PatchedLoraProjection(nn.Module):
|
||||
def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None):
|
||||
super().__init__()
|
||||
from .models.attention_processor import LoRALinearLayer
|
||||
from .models.lora import LoRALinearLayer
|
||||
|
||||
self.regular_linear_layer = regular_linear_layer
|
||||
|
||||
@@ -244,6 +243,7 @@ class UNet2DConditionLoadersMixin:
|
||||
SlicedAttnAddedKVProcessor,
|
||||
XFormersAttnProcessor,
|
||||
)
|
||||
from .models.lora import LoRACompatibleConv, LoRACompatibleLinear, LoRAConv2dLayer, LoRALinearLayer
|
||||
|
||||
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
|
||||
force_download = kwargs.pop("force_download", False)
|
||||
|
||||
@@ -5,7 +5,7 @@ import numpy as np
|
||||
import PIL
|
||||
from PIL import Image
|
||||
|
||||
from ...utils import BaseOutput, is_torch_available, is_transformers_available
|
||||
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -27,7 +27,12 @@ class AltDiffusionPipelineOutput(BaseOutput):
|
||||
nsfw_content_detected: Optional[List[bool]]
|
||||
|
||||
|
||||
if is_transformers_available() and is_torch_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
|
||||
else:
|
||||
from .modeling_roberta_series import RobertaSeriesModelWithTransformation
|
||||
from .pipeline_alt_diffusion import AltDiffusionPipeline
|
||||
from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline
|
||||
|
||||
@@ -7,7 +7,12 @@ from ...utils import (
|
||||
)
|
||||
|
||||
|
||||
if is_transformers_available() and is_torch_available() and is_invisible_watermark_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available() and is_invisible_watermark_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
|
||||
else:
|
||||
from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ from ...utils import (
|
||||
OptionalDependencyNotAvailable,
|
||||
is_torch_available,
|
||||
is_transformers_available,
|
||||
is_transformers_version,
|
||||
)
|
||||
|
||||
|
||||
@@ -10,7 +9,7 @@ try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
|
||||
from ...utils.dummy_torch_and_transformers_objects import *
|
||||
else:
|
||||
from .pipeline_kandinsky import KandinskyPipeline
|
||||
from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline
|
||||
|
||||
@@ -1,7 +1,20 @@
|
||||
from .pipeline_kandinsky2_2 import KandinskyV22Pipeline
|
||||
from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline
|
||||
from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline
|
||||
from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline
|
||||
from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline
|
||||
from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline
|
||||
from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline
|
||||
from ...utils import (
|
||||
OptionalDependencyNotAvailable,
|
||||
is_torch_available,
|
||||
is_transformers_available,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import *
|
||||
else:
|
||||
from .pipeline_kandinsky2_2 import KandinskyV22Pipeline
|
||||
from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline
|
||||
from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline
|
||||
from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline
|
||||
from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline
|
||||
from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline
|
||||
from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
from ...utils import is_transformers_available
|
||||
from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
|
||||
from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline
|
||||
|
||||
|
||||
if is_transformers_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
|
||||
else:
|
||||
from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline
|
||||
|
||||
@@ -5,9 +5,14 @@ import numpy as np
|
||||
import PIL
|
||||
from PIL import Image
|
||||
|
||||
from ...utils import is_torch_available, is_transformers_available
|
||||
from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
|
||||
|
||||
|
||||
if is_transformers_available() and is_torch_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
|
||||
else:
|
||||
from .image_encoder import PaintByExampleImageEncoder
|
||||
from .pipeline_paint_by_example import PaintByExamplePipeline
|
||||
|
||||
@@ -6,7 +6,7 @@ import numpy as np
|
||||
import PIL
|
||||
from PIL import Image
|
||||
|
||||
from ...utils import BaseOutput, is_torch_available, is_transformers_available
|
||||
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -27,5 +27,10 @@ class SemanticStableDiffusionPipelineOutput(BaseOutput):
|
||||
nsfw_content_detected: Optional[List[bool]]
|
||||
|
||||
|
||||
if is_transformers_available() and is_torch_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
|
||||
else:
|
||||
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
|
||||
|
||||
@@ -6,7 +6,7 @@ import numpy as np
|
||||
import PIL
|
||||
from PIL import Image
|
||||
|
||||
from ...utils import BaseOutput, is_torch_available, is_transformers_available
|
||||
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -66,6 +66,11 @@ class StableDiffusionSafePipelineOutput(BaseOutput):
|
||||
applied_safety_concept: Optional[str]
|
||||
|
||||
|
||||
if is_transformers_available() and is_torch_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import *
|
||||
else:
|
||||
from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe
|
||||
from .safety_checker import SafeStableDiffusionSafetyChecker
|
||||
|
||||
@@ -4,7 +4,13 @@ from typing import List, Optional, Union
|
||||
import numpy as np
|
||||
import PIL
|
||||
|
||||
from ...utils import BaseOutput, is_invisible_watermark_available, is_torch_available, is_transformers_available
|
||||
from ...utils import (
|
||||
BaseOutput,
|
||||
OptionalDependencyNotAvailable,
|
||||
is_invisible_watermark_available,
|
||||
is_torch_available,
|
||||
is_transformers_available,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -21,7 +27,12 @@ class StableDiffusionXLPipelineOutput(BaseOutput):
|
||||
images: Union[List[PIL.Image.Image], np.ndarray]
|
||||
|
||||
|
||||
if is_transformers_available() and is_torch_available() and is_invisible_watermark_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available() and is_invisible_watermark_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
|
||||
else:
|
||||
from .pipeline_stable_diffusion_xl import StableDiffusionXLPipeline
|
||||
from .pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipeline
|
||||
from .pipeline_stable_diffusion_xl_inpaint import StableDiffusionXLInpaintPipeline
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
from ...utils import is_torch_available, is_transformers_available
|
||||
from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
|
||||
|
||||
|
||||
if is_transformers_available() and is_torch_available():
|
||||
try:
|
||||
if not (is_transformers_available() and is_torch_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import *
|
||||
else:
|
||||
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
|
||||
|
||||
Reference in New Issue
Block a user