From c68db8014ffe401efad676df873f71008b3282f0 Mon Sep 17 00:00:00 2001 From: DN6 Date: Thu, 17 Apr 2025 00:29:31 +0530 Subject: [PATCH] update --- src/diffusers/pipelines/__init__.py | 76 ++- .../pipelines/deprecated/__init__.py | 120 +++- .../alt_diffusion/pipeline_alt_diffusion.py | 4 +- .../pipeline_alt_diffusion_img2img.py | 4 +- .../{ => deprecated}/amused/__init__.py | 6 +- .../amused/pipeline_amused.py | 10 +- .../amused/pipeline_amused_img2img.py | 10 +- .../amused/pipeline_amused_inpaint.py | 10 +- .../pipeline_audio_diffusion.py | 2 +- .../pipelines/deprecated/audioldm/__init__.py | 51 ++ .../deprecated/audioldm/pipeline_audioldm.py | 563 ++++++++++++++++++ .../controlnet_xs/__init__.py | 10 +- .../controlnet_xs/pipeline_controlnet_xs.py | 22 +- .../pipeline_controlnet_xs_sd_xl.py | 24 +- .../dance_diffusion/__init__.py | 2 +- .../pipeline_dance_diffusion.py | 8 +- .../{ => deprecated}/i2vgen_xl/__init__.py | 6 +- .../i2vgen_xl/pipeline_i2vgen_xl.py | 14 +- .../{ => deprecated}/latte/__init__.py | 6 +- .../{ => deprecated}/latte/pipeline_latte.py | 14 +- .../{ => deprecated}/musicldm/__init__.py | 6 +- .../musicldm/pipeline_musicldm.py | 10 +- .../paint_by_example/__init__.py | 6 +- .../paint_by_example/image_encoder.py | 2 +- .../pipeline_paint_by_example.py | 10 +- .../{ => deprecated}/pia/__init__.py | 6 +- .../{ => deprecated}/pia/pipeline_pia.py | 26 +- .../semantic_stable_diffusion/__init__.py | 6 +- .../pipeline_output.py | 2 +- .../pipeline_semantic_stable_diffusion.py | 10 +- .../{ => deprecated}/shap_e/__init__.py | 6 +- .../{ => deprecated}/shap_e/camera.py | 0 .../shap_e/pipeline_shap_e.py | 8 +- .../shap_e/pipeline_shap_e_img2img.py | 8 +- .../{ => deprecated}/shap_e/renderer.py | 2 +- .../pipeline_spectrogram_diffusion.py | 2 +- .../__init__.py | 6 +- ...line_stable_diffusion_attend_and_excite.py | 16 +- .../stable_diffusion_diffedit/__init__.py | 6 +- .../pipeline_stable_diffusion_diffedit.py | 14 +- .../stable_diffusion_gligen/__init__.py | 6 +- .../pipeline_stable_diffusion_gligen.py | 16 +- ...line_stable_diffusion_gligen_text_image.py | 16 +- .../stable_diffusion_k_diffusion/__init__.py | 6 +- .../pipeline_stable_diffusion_k_diffusion.py | 16 +- ...ipeline_stable_diffusion_xl_k_diffusion.py | 18 +- .../stable_diffusion_ldm3d/__init__.py | 6 +- .../pipeline_stable_diffusion_ldm3d.py | 14 +- .../stable_diffusion_panorama/__init__.py | 6 +- .../pipeline_stable_diffusion_panorama.py | 14 +- .../stable_diffusion_safe/__init__.py | 6 +- .../stable_diffusion_safe/pipeline_output.py | 2 +- .../pipeline_stable_diffusion_safe.py | 12 +- .../stable_diffusion_safe/safety_checker.py | 2 +- .../stable_diffusion_sag/__init__.py | 6 +- .../pipeline_stable_diffusion_sag.py | 14 +- .../pipeline_cycle_diffusion.py | 6 +- ...ne_onnx_stable_diffusion_inpaint_legacy.py | 4 +- ...ipeline_stable_diffusion_inpaint_legacy.py | 6 +- ...pipeline_stable_diffusion_model_editing.py | 6 +- .../pipeline_stable_diffusion_paradigms.py | 6 +- .../pipeline_stable_diffusion_pix2pix_zero.py | 6 +- .../text_to_video_synthesis/__init__.py | 6 +- .../pipeline_output.py | 2 +- .../pipeline_text_to_video_synth.py | 14 +- .../pipeline_text_to_video_synth_img2img.py | 14 +- .../pipeline_text_to_video_zero.py | 14 +- .../pipeline_text_to_video_zero_sdxl.py | 18 +- .../{ => deprecated}/unclip/__init__.py | 6 +- .../unclip/pipeline_unclip.py | 10 +- .../unclip/pipeline_unclip_image_variation.py | 10 +- .../{ => deprecated}/unclip/text_proj.py | 0 .../{ => deprecated}/unidiffuser/__init__.py | 6 +- .../unidiffuser/modeling_text_decoder.py | 0 .../unidiffuser/modeling_uvit.py | 2 +- .../unidiffuser/pipeline_unidiffuser.py | 24 +- .../pipeline_versatile_diffusion.py | 2 +- .../{ => deprecated}/wuerstchen/__init__.py | 6 +- .../wuerstchen/modeling_paella_vq_model.py | 2 +- .../wuerstchen/modeling_wuerstchen_common.py | 0 .../modeling_wuerstchen_diffnext.py | 0 .../wuerstchen/modeling_wuerstchen_prior.py | 0 .../wuerstchen/pipeline_wuerstchen.py | 8 +- .../pipeline_wuerstchen_combined.py | 4 +- .../wuerstchen/pipeline_wuerstchen_prior.py | 8 +- 85 files changed, 1090 insertions(+), 374 deletions(-) rename src/diffusers/pipelines/{ => deprecated}/amused/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/amused/pipeline_amused.py (98%) rename src/diffusers/pipelines/{ => deprecated}/amused/pipeline_amused_img2img.py (98%) rename src/diffusers/pipelines/{ => deprecated}/amused/pipeline_amused_inpaint.py (98%) create mode 100644 src/diffusers/pipelines/deprecated/audioldm/__init__.py create mode 100644 src/diffusers/pipelines/deprecated/audioldm/pipeline_audioldm.py rename src/diffusers/pipelines/{ => deprecated}/controlnet_xs/__init__.py (84%) rename src/diffusers/pipelines/{ => deprecated}/controlnet_xs/pipeline_controlnet_xs.py (98%) rename src/diffusers/pipelines/{ => deprecated}/controlnet_xs/pipeline_controlnet_xs_sd_xl.py (98%) rename src/diffusers/pipelines/{ => deprecated}/dance_diffusion/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/dance_diffusion/pipeline_dance_diffusion.py (97%) rename src/diffusers/pipelines/{ => deprecated}/i2vgen_xl/__init__.py (86%) rename src/diffusers/pipelines/{ => deprecated}/i2vgen_xl/pipeline_i2vgen_xl.py (99%) rename src/diffusers/pipelines/{ => deprecated}/latte/__init__.py (86%) rename src/diffusers/pipelines/{ => deprecated}/latte/pipeline_latte.py (99%) rename src/diffusers/pipelines/{ => deprecated}/musicldm/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/musicldm/pipeline_musicldm.py (99%) rename src/diffusers/pipelines/{ => deprecated}/paint_by_example/__init__.py (89%) rename src/diffusers/pipelines/{ => deprecated}/paint_by_example/image_encoder.py (98%) rename src/diffusers/pipelines/{ => deprecated}/paint_by_example/pipeline_paint_by_example.py (99%) rename src/diffusers/pipelines/{ => deprecated}/pia/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/pia/pipeline_pia.py (98%) rename src/diffusers/pipelines/{ => deprecated}/semantic_stable_diffusion/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/semantic_stable_diffusion/pipeline_output.py (95%) rename src/diffusers/pipelines/{ => deprecated}/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py (99%) rename src/diffusers/pipelines/{ => deprecated}/shap_e/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/shap_e/camera.py (100%) rename src/diffusers/pipelines/{ => deprecated}/shap_e/pipeline_shap_e.py (98%) rename src/diffusers/pipelines/{ => deprecated}/shap_e/pipeline_shap_e_img2img.py (98%) rename src/diffusers/pipelines/{ => deprecated}/shap_e/renderer.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_attend_and_excite/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_diffedit/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_gligen/__init__.py (89%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_k_diffusion/__init__.py (89%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_ldm3d/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_panorama/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/__init__.py (94%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/pipeline_output.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/pipeline_stable_diffusion_safe.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/safety_checker.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_sag/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_sag/pipeline_stable_diffusion_sag.py (99%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/__init__.py (90%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_output.py (96%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_synth.py (98%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py (98%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_zero.py (99%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py (99%) rename src/diffusers/pipelines/{ => deprecated}/unclip/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/unclip/pipeline_unclip.py (98%) rename src/diffusers/pipelines/{ => deprecated}/unclip/pipeline_unclip_image_variation.py (98%) rename src/diffusers/pipelines/{ => deprecated}/unclip/text_proj.py (100%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/modeling_text_decoder.py (100%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/modeling_uvit.py (99%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/pipeline_unidiffuser.py (98%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_paella_vq_model.py (99%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_wuerstchen_common.py (100%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_wuerstchen_diffnext.py (100%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_wuerstchen_prior.py (100%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/pipeline_wuerstchen.py (98%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/pipeline_wuerstchen_combined.py (99%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/pipeline_wuerstchen_prior.py (98%) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 011f23ed37..f58fff3377 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -115,10 +115,43 @@ else: "VersatileDiffusionImageVariationPipeline", "VersatileDiffusionPipeline", "VersatileDiffusionTextToImagePipeline", + "AmusedImg2ImgPipeline", + "AmusedInpaintPipeline", + "AmusedPipeline", + "StableDiffusionControlNetXSPipeline", + "StableDiffusionXLControlNetXSPipeline", + "I2VGenXLPipeline", + "LattePipeline", + "MusicLDMPipeline", + "PaintByExamplePipeline", + "PIAPipeline", + "SemanticStableDiffusionPipeline", + "ShapEImg2ImgPipeline", + "ShapEPipeline", + "StableDiffusionAttendAndExcitePipeline", + "StableDiffusionPipelineSafe", + "StableDiffusionSAGPipeline", + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + "StableDiffusionDiffEditPipeline", + "StableDiffusionLDM3DPipeline", + "StableDiffusionPanoramaPipeline", + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "VideoToVideoSDPipeline", + "UnCLIPImageVariationPipeline", + "UnCLIPPipeline", + "ImageTextPipelineOutput", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", ] ) _import_structure["allegro"] = ["AllegroPipeline"] - _import_structure["amused"] = ["AmusedImg2ImgPipeline", "AmusedInpaintPipeline", "AmusedPipeline"] _import_structure["animatediff"] = [ "AnimateDiffPipeline", "AnimateDiffControlNetPipeline", @@ -191,12 +224,6 @@ else: "SanaPAGPipeline", ] ) - _import_structure["controlnet_xs"].extend( - [ - "StableDiffusionControlNetXSPipeline", - "StableDiffusionXLControlNetXSPipeline", - ] - ) _import_structure["controlnet_hunyuandit"].extend( [ "HunyuanDiTControlNetPipeline", @@ -264,7 +291,6 @@ else: "LEditsPPPipelineStableDiffusionXL", ] ) - _import_structure["latte"] = ["LattePipeline"] _import_structure["ltx"] = ["LTXPipeline", "LTXImageToVideoPipeline", "LTXConditionPipeline"] _import_structure["lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"] _import_structure["lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"] @@ -276,14 +302,9 @@ else: ] ) _import_structure["mochi"] = ["MochiPipeline"] - _import_structure["musicldm"] = ["MusicLDMPipeline"] _import_structure["omnigen"] = ["OmniGenPipeline"] - _import_structure["paint_by_example"] = ["PaintByExamplePipeline"] - _import_structure["pia"] = ["PIAPipeline"] _import_structure["pixart_alpha"] = ["PixArtAlphaPipeline", "PixArtSigmaPipeline"] _import_structure["sana"] = ["SanaPipeline", "SanaSprintPipeline", "SanaControlNetPipeline"] - _import_structure["semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] - _import_structure["shap_e"] = ["ShapEImg2ImgPipeline", "ShapEPipeline"] _import_structure["stable_audio"] = [ "StableAudioProjectionModel", "StableAudioPipeline", @@ -315,13 +336,6 @@ else: "StableDiffusion3Img2ImgPipeline", "StableDiffusion3InpaintPipeline", ] - _import_structure["stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] - _import_structure["stable_diffusion_safe"] = ["StableDiffusionPipelineSafe"] - _import_structure["stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] - _import_structure["stable_diffusion_gligen"] = [ - "StableDiffusionGLIGENPipeline", - "StableDiffusionGLIGENTextImagePipeline", - ] _import_structure["stable_video_diffusion"] = ["StableVideoDiffusionPipeline"] _import_structure["stable_diffusion_xl"].extend( [ @@ -331,32 +345,10 @@ else: "StableDiffusionXLPipeline", ] ) - _import_structure["stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] - _import_structure["stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] - _import_structure["stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] _import_structure["t2i_adapter"] = [ "StableDiffusionAdapterPipeline", "StableDiffusionXLAdapterPipeline", ] - _import_structure["text_to_video_synthesis"] = [ - "TextToVideoSDPipeline", - "TextToVideoZeroPipeline", - "TextToVideoZeroSDXLPipeline", - "VideoToVideoSDPipeline", - ] - _import_structure["i2vgen_xl"] = ["I2VGenXLPipeline"] - _import_structure["unclip"] = ["UnCLIPImageVariationPipeline", "UnCLIPPipeline"] - _import_structure["unidiffuser"] = [ - "ImageTextPipelineOutput", - "UniDiffuserModel", - "UniDiffuserPipeline", - "UniDiffuserTextDecoder", - ] - _import_structure["wuerstchen"] = [ - "WuerstchenCombinedPipeline", - "WuerstchenDecoderPipeline", - "WuerstchenPriorPipeline", - ] _import_structure["wan"] = ["WanPipeline", "WanImageToVideoPipeline", "WanVideoToVideoPipeline"] try: if not is_onnx_available(): diff --git a/src/diffusers/pipelines/deprecated/__init__.py b/src/diffusers/pipelines/deprecated/__init__.py index 9936323170..6464752f5b 100644 --- a/src/diffusers/pipelines/deprecated/__init__.py +++ b/src/diffusers/pipelines/deprecated/__init__.py @@ -1,4 +1,8 @@ from typing import TYPE_CHECKING +import functools +import inspect +import warnings +import sys from ...utils import ( DIFFUSERS_SLOW_IMPORT, @@ -12,6 +16,27 @@ from ...utils import ( ) +# Custom Lazy Module for deprecated pipelines that shows a warning +class _DeprecatedLazyModule(_LazyModule): + """ + Module class that surfaces all objects but only performs associated imports when the objects are requested, + and shows deprecation warnings when any of its attributes are accessed. + """ + + def __getattr__(self, name): + # Regular attribute access - first check if it's supposed to be loaded + if name in self._modules or name in self._class_to_module: + # Only warn for actual pipeline components, not utility functions + warnings.warn( + f"{name} is deprecated and will no longer be maintained.", + FutureWarning, + stacklevel=2, + ) + + # Use the standard lazy module behavior to load the attribute + return super().__getattr__(name) + + _dummy_objects = {} _import_structure = {} @@ -23,6 +48,7 @@ except OptionalDependencyNotAvailable: _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: + _import_structure["dance_diffusion"] = ["DanceDiffusionPipeline"] _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] _import_structure["pndm"] = ["PNDMPipeline"] _import_structure["repaint"] = ["RePaintPipeline"] @@ -42,6 +68,55 @@ else: "AltDiffusionPipeline", "AltDiffusionPipelineOutput", ] + _import_structure["amused"] = ["AmusedPipeline", "AmusedImg2ImgPipeline", "AmusedInpaintPipeline"] + _import_structure["audioldm"] = ["AudioLDMPipeline"] + _import_structure["controlnet_xs"] = [ + "StableDiffusionControlNetXSPipeline", + "StableDiffusionXLControlNetXSPipeline", + ] + _import_structure["i2vgen_xl"] = ["I2VGenXLPipeline"] + _import_structure["latte"] = ["LattePipeline"] + _import_structure["musicldm"] = ["MusicLDMPipeline"] + _import_structure["paint_by_example"] = ["PaintByExamplePipeline"] + _import_structure["pia"] = ["PIAPipeline"] + _import_structure["semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] + _import_structure["shap_e"] = [ + "ShapEPipeline", + "ShapEImg2ImgPipeline", + ] + _import_structure["stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + _import_structure["stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] + _import_structure["stable_diffusion_gligen"] = [ + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + ] + _import_structure["stable_diffusion_k_diffusion"] = [ + "StableDiffusionKDiffusionPipeline", + "StableDiffusionXLKDiffusionPipeline", + ] + _import_structure["stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] + _import_structure["stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] + _import_structure["stable_diffusion_safe"] = ["StableDiffusionPipelineSafe"] + _import_structure["stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] + _import_structure["stable_diffusion_variants"] = [ + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + ] + _import_structure["text_to_video_synthesis"] = [ + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + ] + _import_structure["unclip"] = [ + "UnCLIPPipeline", + "UnCLIPImageVariationPipeline", + ] + _import_structure["unidiffuser"] = [ + "UniDiffuserPipeline", + ] _import_structure["versatile_diffusion"] = [ "VersatileDiffusionDualGuidedPipeline", "VersatileDiffusionImageVariationPipeline", @@ -49,12 +124,10 @@ else: "VersatileDiffusionTextToImagePipeline", ] _import_structure["vq_diffusion"] = ["VQDiffusionPipeline"] - _import_structure["stable_diffusion_variants"] = [ - "CycleDiffusionPipeline", - "StableDiffusionInpaintPipelineLegacy", - "StableDiffusionPix2PixZeroPipeline", - "StableDiffusionParadigmsPipeline", - "StableDiffusionModelEditingPipeline", + _import_structure["wuerstchen"] = [ + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", ] try: @@ -88,6 +161,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from ...utils.dummy_pt_objects import * else: + from .dance_diffusion import DanceDiffusionPipeline from .latent_diffusion_uncond import LDMPipeline from .pndm import PNDMPipeline from .repaint import RePaintPipeline @@ -102,8 +176,26 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: else: from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AltDiffusionPipelineOutput - from .audio_diffusion import AudioDiffusionPipeline, Mel - from .spectrogram_diffusion import SpectrogramDiffusionPipeline + from .audioldm import AudioLDMPipeline + from .controlnet_xs import StableDiffusionControlNetXSPipeline, StableDiffusionXLControlNetXSPipeline + from .i2vgen_xl import I2VGenXLPipeline + from .latte import LattePipeline + from .musicldm import MusicLDMPipeline + from .paint_by_example import PaintByExamplePipeline + from .pia import PIAPipeline + from .semantic_stable_diffusion import SemanticStableDiffusionPipeline + from .shap_e import ShapEPipeline, ShapEImg2ImgPipeline + from .stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline + from .stable_diffusion_diffedit import StableDiffusionDiffEditPipeline + from .stable_diffusion_gligen import StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline + from .stable_diffusion_k_diffusion import ( + StableDiffusionKDiffusionPipeline, + StableDiffusionXLKDiffusionPipeline, + ) + from .stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline + from .stable_diffusion_panorama import StableDiffusionPanoramaPipeline + from .stable_diffusion_safe import StableDiffusionPipelineSafe + from .stable_diffusion_sag import StableDiffusionSAGPipeline from .stable_diffusion_variants import ( CycleDiffusionPipeline, StableDiffusionInpaintPipelineLegacy, @@ -111,7 +203,13 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: StableDiffusionParadigmsPipeline, StableDiffusionPix2PixZeroPipeline, ) - from .stochastic_karras_ve import KarrasVePipeline + from .text_to_video_synthesis import ( + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + ) + from .unclip import UnCLIPPipeline, UnCLIPImageVariationPipeline + from .unidiffuser import UniDiffuserPipeline from .versatile_diffusion import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, @@ -119,6 +217,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: VersatileDiffusionTextToImagePipeline, ) from .vq_diffusion import VQDiffusionPipeline + from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline try: if not (is_torch_available() and is_librosa_available()): @@ -143,7 +242,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: else: import sys - sys.modules[__name__] = _LazyModule( + # Use the custom deprecated lazy module instead of the standard one + sys.modules[__name__] = _DeprecatedLazyModule( __name__, globals()["__file__"], _import_structure, diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py index 48c0aa4f6d..6fc9e1ec39 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py @@ -39,8 +39,8 @@ from ....utils import ( unscale_lora_layers, ) from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin -from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_output import AltDiffusionPipelineOutput diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py index fa70689d79..563c81d1ce 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -42,8 +42,8 @@ from ....utils import ( unscale_lora_layers, ) from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin -from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_output import AltDiffusionPipelineOutput diff --git a/src/diffusers/pipelines/amused/__init__.py b/src/diffusers/pipelines/deprecated/amused/__init__.py similarity index 91% rename from src/diffusers/pipelines/amused/__init__.py rename to src/diffusers/pipelines/deprecated/amused/__init__.py index 3c4d07a426..2812eadf6f 100644 --- a/src/diffusers/pipelines/amused/__init__.py +++ b/src/diffusers/pipelines/deprecated/amused/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -16,7 +16,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline, @@ -40,7 +40,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( AmusedPipeline, ) else: diff --git a/src/diffusers/pipelines/amused/pipeline_amused.py b/src/diffusers/pipelines/deprecated/amused/pipeline_amused.py similarity index 98% rename from src/diffusers/pipelines/amused/pipeline_amused.py rename to src/diffusers/pipelines/deprecated/amused/pipeline_amused.py index 12f7dc7c59..eeedc37b13 100644 --- a/src/diffusers/pipelines/amused/pipeline_amused.py +++ b/src/diffusers/pipelines/deprecated/amused/pipeline_amused.py @@ -17,11 +17,11 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...models import UVit2DModel, VQModel -from ...schedulers import AmusedScheduler -from ...utils import is_torch_xla_available, replace_example_docstring -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....image_processor import VaeImageProcessor +from ....models import UVit2DModel, VQModel +from ....schedulers import AmusedScheduler +from ....utils import is_torch_xla_available, replace_example_docstring +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/amused/pipeline_amused_img2img.py b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_img2img.py similarity index 98% rename from src/diffusers/pipelines/amused/pipeline_amused_img2img.py rename to src/diffusers/pipelines/deprecated/amused/pipeline_amused_img2img.py index 7ac05b39c3..2ff1e503fc 100644 --- a/src/diffusers/pipelines/amused/pipeline_amused_img2img.py +++ b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_img2img.py @@ -17,11 +17,11 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...models import UVit2DModel, VQModel -from ...schedulers import AmusedScheduler -from ...utils import is_torch_xla_available, replace_example_docstring -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....models import UVit2DModel, VQModel +from ....schedulers import AmusedScheduler +from ....utils import is_torch_xla_available, replace_example_docstring +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_inpaint.py similarity index 98% rename from src/diffusers/pipelines/amused/pipeline_amused_inpaint.py rename to src/diffusers/pipelines/deprecated/amused/pipeline_amused_inpaint.py index d908c32745..534e2b47d7 100644 --- a/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py +++ b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_inpaint.py @@ -18,11 +18,11 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...models import UVit2DModel, VQModel -from ...schedulers import AmusedScheduler -from ...utils import is_torch_xla_available, replace_example_docstring -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....models import UVit2DModel, VQModel +from ....schedulers import AmusedScheduler +from ....utils import is_torch_xla_available, replace_example_docstring +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py index 47044e050a..663e474344 100644 --- a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -23,7 +23,7 @@ from PIL import Image from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import DDIMScheduler, DDPMScheduler from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput +from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel diff --git a/src/diffusers/pipelines/deprecated/audioldm/__init__.py b/src/diffusers/pipelines/deprecated/audioldm/__init__.py new file mode 100644 index 0000000000..75b11bf278 --- /dev/null +++ b/src/diffusers/pipelines/deprecated/audioldm/__init__.py @@ -0,0 +1,51 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + AudioLDMPipeline, + ) + + _dummy_objects.update({"AudioLDMPipeline": AudioLDMPipeline}) +else: + _import_structure["pipeline_audioldm"] = ["AudioLDMPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + AudioLDMPipeline, + ) + + else: + from .pipeline_audioldm import AudioLDMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/pipelines/deprecated/audioldm/pipeline_audioldm.py b/src/diffusers/pipelines/deprecated/audioldm/pipeline_audioldm.py new file mode 100644 index 0000000000..307325e7e7 --- /dev/null +++ b/src/diffusers/pipelines/deprecated/audioldm/pipeline_audioldm.py @@ -0,0 +1,563 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +import torch.nn.functional as F +from transformers import ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan + +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import is_torch_xla_available, logging, replace_example_docstring +from ....utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AudioLDMPipeline + >>> import torch + >>> import scipy + + >>> repo_id = "cvssp/audioldm-s-full-v2" + >>> pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" + >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] + + >>> # save the audio sample as a .wav file + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) + ``` +""" + + +class AudioLDMPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-audio generation using AudioLDM. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.ClapTextModelWithProjection`]): + Frozen text-encoder (`ClapTextModelWithProjection`, specifically the + [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. + tokenizer ([`PreTrainedTokenizer`]): + A [`~transformers.RobertaTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded audio latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + vocoder ([`~transformers.SpeechT5HifiGan`]): + Vocoder of class `SpeechT5HifiGan`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ClapTextModelWithProjection, + tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + vocoder: SpeechT5HifiGan, + ): + super().__init__() + + logger.warning(f"{self.__class__.__name__} is deprecated and will no longer be actively maintained") + + logger.warning( + f"{self.__class__.__name__} is deprecated and will no longer be actively maintained" + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + vocoder=vocoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 + + def _encode_prompt( + self, + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device (`torch.device`): + torch device + num_waveforms_per_prompt (`int`): + number of waveforms that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the audio generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLAP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask.to(device), + ) + prompt_embeds = prompt_embeds.text_embeds + # additional L_2 normalization over each hidden-state + prompt_embeds = F.normalize(prompt_embeds, dim=-1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + ( + bs_embed, + seq_len, + ) = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) + prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input_ids, + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds.text_embeds + # additional L_2 normalization over each hidden-state + negative_prompt_embeds = F.normalize(negative_prompt_embeds, dim=-1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + mel_spectrogram = self.vae.decode(latents).sample + return mel_spectrogram + + def mel_spectrogram_to_waveform(self, mel_spectrogram): + if mel_spectrogram.dim() == 4: + mel_spectrogram = mel_spectrogram.squeeze(1) + + waveform = self.vocoder(mel_spectrogram) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + waveform = waveform.cpu().float() + return waveform + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor + if audio_length_in_s < min_audio_length_in_s: + raise ValueError( + f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " + f"is {audio_length_in_s}." + ) + + if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: + raise ValueError( + f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " + f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " + f"{self.vae_scale_factor}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim + def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(self.vocoder.config.model_in_dim) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + audio_length_in_s: Optional[float] = None, + num_inference_steps: int = 10, + guidance_scale: float = 2.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + output_type: Optional[str] = "np", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + audio_length_in_s (`int`, *optional*, defaults to 5.12): + The length of the generated audio sample in seconds. + num_inference_steps (`int`, *optional*, defaults to 10): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 2.5): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated image. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. + + Examples: + + Returns: + [`~pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to spectrogram height + vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor + + height = int(audio_length_in_s / vocoder_upsample_factor) + + original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) + if height % self.vae_scale_factor != 0: + height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor + logger.info( + f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " + f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " + f"denoising process." + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_latents, + height, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=None, + class_labels=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + # 8. Post-processing + mel_spectrogram = self.decode_latents(latents) + + audio = self.mel_spectrogram_to_waveform(mel_spectrogram) + + audio = audio[:, :original_waveform_length] + + if output_type == "np": + audio = audio.numpy() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/src/diffusers/pipelines/controlnet_xs/__init__.py b/src/diffusers/pipelines/deprecated/controlnet_xs/__init__.py similarity index 84% rename from src/diffusers/pipelines/controlnet_xs/__init__.py rename to src/diffusers/pipelines/deprecated/controlnet_xs/__init__.py index 978278b184..422af210d4 100644 --- a/src/diffusers/pipelines/controlnet_xs/__init__.py +++ b/src/diffusers/pipelines/deprecated/controlnet_xs/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -28,7 +28,7 @@ try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_flax_and_transformers_objects # noqa F403 + from ....utils import dummy_flax_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: @@ -41,7 +41,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_controlnet_xs import StableDiffusionControlNetXSPipeline from .pipeline_controlnet_xs_sd_xl import StableDiffusionXLControlNetXSPipeline @@ -50,7 +50,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 + from ....utils.dummy_flax_and_transformers_objects import * # noqa F403 else: pass # from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline diff --git a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs.py similarity index 98% rename from src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py rename to src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs.py index 901ca25c57..3dcb9a5a0d 100644 --- a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +++ b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs.py @@ -21,13 +21,13 @@ import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...callbacks import MultiPipelineCallbacks, PipelineCallback -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....callbacks import MultiPipelineCallbacks, PipelineCallback +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -36,10 +36,10 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor -from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs_sd_xl.py similarity index 98% rename from src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py rename to src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs_sd_xl.py index acf1f5489e..df1bac8f5f 100644 --- a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +++ b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs_sd_xl.py @@ -28,33 +28,33 @@ from transformers import ( from diffusers.utils.import_utils import is_invisible_watermark_available -from ...callbacks import MultiPipelineCallbacks, PipelineCallback -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel -from ...models.attention_processor import ( +from ....callbacks import MultiPipelineCallbacks, PipelineCallback +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel +from ....models.attention_processor import ( AttnProcessor2_0, XFormersAttnProcessor, ) -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor -from ..pipeline_utils import DiffusionPipeline -from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from ....utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker -from ...utils import is_torch_xla_available +from ....utils import is_torch_xla_available if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/dance_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/dance_diffusion/__init__.py similarity index 87% rename from src/diffusers/pipelines/dance_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/dance_diffusion/__init__.py index 0d3e466dfa..8dcd746787 100644 --- a/src/diffusers/pipelines/dance_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/dance_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_dance_diffusion": ["DanceDiffusionPipeline"]} diff --git a/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py b/src/diffusers/pipelines/deprecated/dance_diffusion/pipeline_dance_diffusion.py similarity index 97% rename from src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py rename to src/diffusers/pipelines/deprecated/dance_diffusion/pipeline_dance_diffusion.py index 34b2a39455..0075eb8b31 100644 --- a/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +++ b/src/diffusers/pipelines/deprecated/dance_diffusion/pipeline_dance_diffusion.py @@ -17,10 +17,10 @@ from typing import List, Optional, Tuple, Union import torch -from ...models import UNet1DModel -from ...schedulers import SchedulerMixin -from ...utils import is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor +from ....models import UNet1DModel +from ....schedulers import SchedulerMixin +from ....utils import is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline diff --git a/src/diffusers/pipelines/i2vgen_xl/__init__.py b/src/diffusers/pipelines/deprecated/i2vgen_xl/__init__.py similarity index 86% rename from src/diffusers/pipelines/i2vgen_xl/__init__.py rename to src/diffusers/pipelines/deprecated/i2vgen_xl/__init__.py index b24a7e4cee..43646542d9 100644 --- a/src/diffusers/pipelines/i2vgen_xl/__init__.py +++ b/src/diffusers/pipelines/deprecated/i2vgen_xl/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -29,7 +29,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_i2vgen_xl import I2VGenXLPipeline diff --git a/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py similarity index 99% rename from src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py rename to src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py index 58d65a190d..e34a4ea26f 100644 --- a/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +++ b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py @@ -21,18 +21,18 @@ import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...models import AutoencoderKL -from ...models.unets.unet_i2vgen_xl import I2VGenXLUNet -from ...schedulers import DDIMScheduler -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....models import AutoencoderKL +from ....models.unets.unet_i2vgen_xl import I2VGenXLUNet +from ....schedulers import DDIMScheduler +from ....utils import ( BaseOutput, is_torch_xla_available, logging, replace_example_docstring, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin diff --git a/src/diffusers/pipelines/latte/__init__.py b/src/diffusers/pipelines/deprecated/latte/__init__.py similarity index 86% rename from src/diffusers/pipelines/latte/__init__.py rename to src/diffusers/pipelines/deprecated/latte/__init__.py index 4296b42e12..dd3a04cf63 100644 --- a/src/diffusers/pipelines/latte/__init__.py +++ b/src/diffusers/pipelines/deprecated/latte/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_latte import LattePipeline diff --git a/src/diffusers/pipelines/latte/pipeline_latte.py b/src/diffusers/pipelines/deprecated/latte/pipeline_latte.py similarity index 99% rename from src/diffusers/pipelines/latte/pipeline_latte.py rename to src/diffusers/pipelines/deprecated/latte/pipeline_latte.py index e9a95e8be4..dfc1022a0b 100644 --- a/src/diffusers/pipelines/latte/pipeline_latte.py +++ b/src/diffusers/pipelines/deprecated/latte/pipeline_latte.py @@ -23,11 +23,11 @@ from typing import Callable, Dict, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer -from ...callbacks import MultiPipelineCallbacks, PipelineCallback -from ...models import AutoencoderKL, LatteTransformer3DModel -from ...pipelines.pipeline_utils import DiffusionPipeline -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....callbacks import MultiPipelineCallbacks, PipelineCallback +from ....models import AutoencoderKL, LatteTransformer3DModel +from ...pipeline_utils import DiffusionPipeline +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( BACKENDS_MAPPING, BaseOutput, deprecate, @@ -37,8 +37,8 @@ from ...utils import ( logging, replace_example_docstring, ) -from ...utils.torch_utils import is_compiled_module, randn_tensor -from ...video_processor import VideoProcessor +from ....utils.torch_utils import is_compiled_module, randn_tensor +from ....video_processor import VideoProcessor if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/musicldm/__init__.py b/src/diffusers/pipelines/deprecated/musicldm/__init__.py similarity index 88% rename from src/diffusers/pipelines/musicldm/__init__.py rename to src/diffusers/pipelines/deprecated/musicldm/__init__.py index ed71eeb1d9..bc9f8d5504 100644 --- a/src/diffusers/pipelines/musicldm/__init__.py +++ b/src/diffusers/pipelines/deprecated/musicldm/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -31,7 +31,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_musicldm import MusicLDMPipeline diff --git a/src/diffusers/pipelines/musicldm/pipeline_musicldm.py b/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py similarity index 99% rename from src/diffusers/pipelines/musicldm/pipeline_musicldm.py rename to src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py index 73837af7d4..b1753756ff 100644 --- a/src/diffusers/pipelines/musicldm/pipeline_musicldm.py +++ b/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py @@ -26,16 +26,16 @@ from transformers import ( SpeechT5HifiGan, ) -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( is_accelerate_available, is_accelerate_version, is_librosa_available, logging, replace_example_docstring, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin @@ -43,7 +43,7 @@ if is_librosa_available(): import librosa -from ...utils import is_torch_xla_available +from ....utils import is_torch_xla_available if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/paint_by_example/__init__.py b/src/diffusers/pipelines/deprecated/paint_by_example/__init__.py similarity index 89% rename from src/diffusers/pipelines/paint_by_example/__init__.py rename to src/diffusers/pipelines/deprecated/paint_by_example/__init__.py index aaa775f690..0364866ac3 100644 --- a/src/diffusers/pipelines/paint_by_example/__init__.py +++ b/src/diffusers/pipelines/deprecated/paint_by_example/__init__.py @@ -5,7 +5,7 @@ import numpy as np import PIL from PIL import Image -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -22,7 +22,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -36,7 +36,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .image_encoder import PaintByExampleImageEncoder from .pipeline_paint_by_example import PaintByExamplePipeline diff --git a/src/diffusers/pipelines/paint_by_example/image_encoder.py b/src/diffusers/pipelines/deprecated/paint_by_example/image_encoder.py similarity index 98% rename from src/diffusers/pipelines/paint_by_example/image_encoder.py rename to src/diffusers/pipelines/deprecated/paint_by_example/image_encoder.py index 2fd0338b1f..76482de4a2 100644 --- a/src/diffusers/pipelines/paint_by_example/image_encoder.py +++ b/src/diffusers/pipelines/deprecated/paint_by_example/image_encoder.py @@ -16,7 +16,7 @@ from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock -from ...utils import logging +from ....utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py b/src/diffusers/pipelines/deprecated/paint_by_example/pipeline_paint_by_example.py similarity index 99% rename from src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py rename to src/diffusers/pipelines/deprecated/paint_by_example/pipeline_paint_by_example.py index 288f269a65..8454948a04 100644 --- a/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +++ b/src/diffusers/pipelines/deprecated/paint_by_example/pipeline_paint_by_example.py @@ -20,11 +20,11 @@ import PIL.Image import torch from transformers import CLIPImageProcessor -from ...image_processor import VaeImageProcessor -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from ...utils import deprecate, is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ....utils import deprecate, is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/pia/__init__.py b/src/diffusers/pipelines/deprecated/pia/__init__.py similarity index 88% rename from src/diffusers/pipelines/pia/__init__.py rename to src/diffusers/pipelines/deprecated/pia/__init__.py index 16e8004966..8d0cae93a6 100644 --- a/src/diffusers/pipelines/pia/__init__.py +++ b/src/diffusers/pipelines/deprecated/pia/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects + from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -28,7 +28,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_pia import PIAPipeline, PIAPipelineOutput diff --git a/src/diffusers/pipelines/pia/pipeline_pia.py b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py similarity index 98% rename from src/diffusers/pipelines/pia/pipeline_pia.py rename to src/diffusers/pipelines/deprecated/pia/pipeline_pia.py index df8499ab90..566c794b78 100644 --- a/src/diffusers/pipelines/pia/pipeline_pia.py +++ b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py @@ -21,12 +21,17 @@ import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput -from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...models.unets.unet_motion_model import MotionAdapter -from ...schedulers import ( +from ....image_processor import PipelineImageInput +from ....loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....models.unets.unet_motion_model import MotionAdapter +from ....schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, @@ -34,7 +39,7 @@ from ...schedulers import ( LMSDiscreteScheduler, PNDMScheduler, ) -from ...utils import ( +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, is_torch_xla_available, @@ -43,12 +48,11 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor from ..free_init_utils import FreeInitMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin - if is_torch_xla_available(): import torch_xla.core.xla_model as xm @@ -191,6 +195,8 @@ class PIAPipeline( image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() + + logger.warning(f"{self.__class__.__name__} is deprecated and will no longer be actively maintained") if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) diff --git a/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/__init__.py similarity index 88% rename from src/diffusers/pipelines/semantic_stable_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/semantic_stable_diffusion/__init__.py index 70f5b1a547..f55af15469 100644 --- a/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -31,7 +31,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline diff --git a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_output.py similarity index 95% rename from src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py rename to src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_output.py index 3499129939..fcfec3ad16 100644 --- a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py +++ b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_output.py @@ -4,7 +4,7 @@ from typing import List, Optional, Union import numpy as np import PIL.Image -from ...utils import BaseOutput +from ....utils import BaseOutput @dataclass diff --git a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py similarity index 99% rename from src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py rename to src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py index a8c3742593..53f5c73014 100644 --- a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +++ b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py @@ -5,12 +5,12 @@ from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...models import AutoencoderKL, UNet2DConditionModel +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, UNet2DConditionModel from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import deprecate, is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import SemanticStableDiffusionPipelineOutput diff --git a/src/diffusers/pipelines/shap_e/__init__.py b/src/diffusers/pipelines/deprecated/shap_e/__init__.py similarity index 91% rename from src/diffusers/pipelines/shap_e/__init__.py rename to src/diffusers/pipelines/deprecated/shap_e/__init__.py index 4ed563c4a5..c80732bf4d 100644 --- a/src/diffusers/pipelines/shap_e/__init__.py +++ b/src/diffusers/pipelines/deprecated/shap_e/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -41,7 +41,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline diff --git a/src/diffusers/pipelines/shap_e/camera.py b/src/diffusers/pipelines/deprecated/shap_e/camera.py similarity index 100% rename from src/diffusers/pipelines/shap_e/camera.py rename to src/diffusers/pipelines/deprecated/shap_e/camera.py diff --git a/src/diffusers/pipelines/shap_e/pipeline_shap_e.py b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e.py similarity index 98% rename from src/diffusers/pipelines/shap_e/pipeline_shap_e.py rename to src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e.py index ef8a95daef..6819107453 100644 --- a/src/diffusers/pipelines/shap_e/pipeline_shap_e.py +++ b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e.py @@ -21,15 +21,15 @@ import PIL.Image import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from ...models import PriorTransformer -from ...schedulers import HeunDiscreteScheduler -from ...utils import ( +from ....models import PriorTransformer +from ....schedulers import HeunDiscreteScheduler +from ....utils import ( BaseOutput, is_torch_xla_available, logging, replace_example_docstring, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .renderer import ShapERenderer diff --git a/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e_img2img.py similarity index 98% rename from src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py rename to src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e_img2img.py index c0d1e38e09..620bde9f01 100644 --- a/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +++ b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e_img2img.py @@ -20,15 +20,15 @@ import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPVisionModel -from ...models import PriorTransformer -from ...schedulers import HeunDiscreteScheduler -from ...utils import ( +from ....models import PriorTransformer +from ....schedulers import HeunDiscreteScheduler +from ....utils import ( BaseOutput, is_torch_xla_available, logging, replace_example_docstring, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .renderer import ShapERenderer diff --git a/src/diffusers/pipelines/shap_e/renderer.py b/src/diffusers/pipelines/deprecated/shap_e/renderer.py similarity index 99% rename from src/diffusers/pipelines/shap_e/renderer.py rename to src/diffusers/pipelines/deprecated/shap_e/renderer.py index dd25945590..2510fe870a 100644 --- a/src/diffusers/pipelines/shap_e/renderer.py +++ b/src/diffusers/pipelines/deprecated/shap_e/renderer.py @@ -23,7 +23,7 @@ from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin -from ...utils import BaseOutput +from ....utils import BaseOutput from .camera import create_pan_cameras diff --git a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py index b8ac8e1416..242add16f3 100644 --- a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -28,7 +28,7 @@ from ....utils.torch_utils import randn_tensor if is_onnx_available(): from ...onnx_utils import OnnxRuntimeModel -from ...pipeline_utils import AudioPipelineOutput, DiffusionPipeline +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continuous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder diff --git a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/__init__.py index cce556fceb..2087f09ea5 100644 --- a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline diff --git a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py index 351b146fb4..eb09b478b3 100644 --- a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py @@ -21,13 +21,13 @@ import torch from torch.nn import functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention_processor import Attention -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention_processor import Attention +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -36,7 +36,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/__init__.py index e2145edb96..3924c61027 100644 --- a/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py index 4b999662a6..9206a392ab 100644 --- a/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py @@ -23,12 +23,12 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...configuration_utils import FrozenDict -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers +from ....utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, BaseOutput, @@ -39,7 +39,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/__init__.py similarity index 89% rename from src/diffusers/pipelines/stable_diffusion_gligen/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_gligen/__init__.py index 147980cbf9..81c8b8b99c 100644 --- a/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -31,7 +31,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_gligen import StableDiffusionGLIGENPipeline from .pipeline_stable_diffusion_gligen_text_image import StableDiffusionGLIGENTextImagePipeline diff --git a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py index 4bbb93e44a..a7d64a579c 100644 --- a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py @@ -20,13 +20,13 @@ import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention import GatedSelfAttentionDense -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention import GatedSelfAttentionDense +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -35,7 +35,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py index 86ef017840..5423d6bca9 100644 --- a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py @@ -26,13 +26,13 @@ from transformers import ( CLIPVisionModelWithProjection, ) -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention import GatedSelfAttentionDense -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention import GatedSelfAttentionDense +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, is_torch_xla_available, logging, @@ -40,7 +40,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.clip_image_project_model import CLIPImageProjection diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/__init__.py similarity index 89% rename from src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/__init__.py index 7eb5bf8c22..459757a14b 100644 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -25,7 +25,7 @@ try: ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) else: @@ -43,7 +43,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * + from ....utils.dummy_torch_and_transformers_and_k_diffusion_objects import * else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline from .pipeline_stable_diffusion_xl_k_diffusion import StableDiffusionXLKDiffusionPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py index 1f29f577f8..fd9052317d 100755 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -26,22 +26,22 @@ from transformers import ( CLIPTokenizerFast, ) -from ...image_processor import VaeImageProcessor -from ...loaders import ( +from ....image_processor import VaeImageProcessor +from ....loaders import ( StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin, ) -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler -from ...utils import ( +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler +from ....utils import ( USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker @@ -122,6 +122,8 @@ class StableDiffusionKDiffusionPipeline( ): super().__init__() + logger.warning(f"{self.__class__.__name__} is deprecated and will no longer be actively maintained") + logger.info( f"{self.__class__} is an experimntal pipeline and is likely to change in the future. We recommend to use" " this pipeline for fast experimentation / iteration if needed, but advice to rely on existing pipelines" diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py index c7c5bd9cff..776ed5bddb 100644 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py @@ -25,29 +25,29 @@ from transformers import ( CLIPTokenizer, ) -from ...image_processor import VaeImageProcessor -from ...loaders import ( +from ....image_processor import VaeImageProcessor +from ....loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention_processor import ( +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention_processor import ( AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, ) -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler -from ...utils import ( +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler +from ....utils import ( USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput @@ -158,6 +158,8 @@ class StableDiffusionXLKDiffusionPipeline( ): super().__init__() + logger.warning(f"{self.__class__.__name__} is deprecated and will no longer be actively maintained") + # get correct sigmas from LMS scheduler = LMSDiscreteScheduler.from_config(scheduler.config) self.register_modules( diff --git a/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/__init__.py index dae2affddd..a2fcf3ab83 100644 --- a/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py index 702f3eda58..578523309a 100644 --- a/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py @@ -21,12 +21,12 @@ import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessorLDM3D -from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessorLDM3D +from ....loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, deprecate, @@ -36,7 +36,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_panorama/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_panorama/__init__.py index f7572db723..ce0601ed26 100644 --- a/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py index ccee6d47b4..e09433f308 100644 --- a/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py @@ -18,12 +18,12 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import DDIMScheduler -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -32,7 +32,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/stable_diffusion_safe/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/__init__.py similarity index 94% rename from src/diffusers/pipelines/stable_diffusion_safe/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/__init__.py index b432b9418c..bf7e47744b 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/__init__.py @@ -6,7 +6,7 @@ import numpy as np import PIL from PIL import Image -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, BaseOutput, OptionalDependencyNotAvailable, @@ -59,7 +59,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects + from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -77,7 +77,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_output import StableDiffusionSafePipelineOutput from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe diff --git a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_output.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_output.py index 69a064d663..3dcea8c9f4 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_output.py @@ -4,7 +4,7 @@ from typing import List, Optional, Union import numpy as np import PIL.Image -from ...utils import ( +from ....utils import ( BaseOutput, ) diff --git a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py index deae82eb88..837670b88c 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py @@ -8,12 +8,12 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...configuration_utils import FrozenDict -from ...image_processor import PipelineImageInput -from ...loaders import IPAdapterMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import deprecate, is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor +from ....image_processor import PipelineImageInput +from ....loaders import IPAdapterMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionSafePipelineOutput from .safety_checker import SafeStableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/safety_checker.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/safety_checker.py index 338e4c65c5..76182ecd96 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/safety_checker.py @@ -16,7 +16,7 @@ import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel -from ...utils import logging +from ....utils import logging logger = logging.get_logger(__name__) diff --git a/src/diffusers/pipelines/stable_diffusion_sag/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_sag/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_sag/__init__.py index 378e0e5781..8cdd1ec6bd 100644 --- a/src/diffusers/pipelines/stable_diffusion_sag/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py index e96422073b..07986bc709 100644 --- a/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py @@ -19,12 +19,12 @@ import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -33,7 +33,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py index 1752540e8f..9193932841 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -29,9 +29,9 @@ from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import DDIMScheduler from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import DiffusionPipeline -from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput -from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ..pipeline_utils import DiffusionPipeline +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py index e9553a8d99..f19f914425 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -10,8 +10,8 @@ from ....configuration_utils import FrozenDict from ....schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ....utils import deprecate, logging from ...onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel -from ...pipeline_utils import DiffusionPipeline -from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..pipeline_utils import DiffusionPipeline +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py index f9c9c37c48..3d087cf20f 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -29,9 +29,9 @@ from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import DiffusionPipeline -from ...stable_diffusion import StableDiffusionPipelineOutput -from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ..pipeline_utils import DiffusionPipeline +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py index 06db871daf..125a0c6338 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -26,9 +26,9 @@ from ....schedulers import PNDMScheduler from ....schedulers.scheduling_utils import SchedulerMixin from ....utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin -from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput -from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py index d486a32f6a..98d78d2909 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -32,9 +32,9 @@ from ....utils import ( unscale_lora_layers, ) from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin -from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput -from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index 509f256209..b635211e92 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -46,9 +46,9 @@ from ....utils import ( unscale_lora_layers, ) from ....utils.torch_utils import randn_tensor -from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin -from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput -from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/text_to_video_synthesis/__init__.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/__init__.py similarity index 90% rename from src/diffusers/pipelines/text_to_video_synthesis/__init__.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/__init__.py index 8d8fdb9276..6c32f5f16c 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/__init__.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -33,7 +33,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_output import TextToVideoSDPipelineOutput from .pipeline_text_to_video_synth import TextToVideoSDPipeline diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_output.py similarity index 96% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_output.py index 040bf0efba..e223bcebf9 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_output.py @@ -5,7 +5,7 @@ import numpy as np import PIL import torch -from ...utils import ( +from ....utils import ( BaseOutput, ) diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py similarity index 98% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py index 5c63d66e31..379c1ca8c6 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py @@ -18,11 +18,11 @@ from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet3DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet3DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -31,8 +31,8 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py similarity index 98% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py index 006c7a79ce..63ba18e476 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py @@ -19,11 +19,11 @@ import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet3DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet3DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -32,8 +32,8 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py similarity index 99% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py index df85f470a8..6a67f4c480 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py @@ -10,12 +10,12 @@ import torch.nn.functional as F from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, is_torch_xla_available, @@ -23,7 +23,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py similarity index 99% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py index 339d5b3a60..5581f80d35 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -16,17 +16,17 @@ from transformers import ( CLIPVisionModelWithProjection, ) -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention_processor import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention_processor import ( AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, ) -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, is_invisible_watermark_available, @@ -34,7 +34,7 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin @@ -42,7 +42,7 @@ if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker -from ...utils import is_torch_xla_available +from ....utils import is_torch_xla_available if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/unclip/__init__.py b/src/diffusers/pipelines/deprecated/unclip/__init__.py similarity index 87% rename from src/diffusers/pipelines/unclip/__init__.py rename to src/diffusers/pipelines/deprecated/unclip/__init__.py index c89e899463..7444df4912 100644 --- a/src/diffusers/pipelines/unclip/__init__.py +++ b/src/diffusers/pipelines/deprecated/unclip/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline + from ....utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline _dummy_objects.update( {"UnCLIPImageVariationPipeline": UnCLIPImageVariationPipeline, "UnCLIPPipeline": UnCLIPPipeline} @@ -33,7 +33,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline diff --git a/src/diffusers/pipelines/unclip/pipeline_unclip.py b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip.py similarity index 98% rename from src/diffusers/pipelines/unclip/pipeline_unclip.py rename to src/diffusers/pipelines/deprecated/unclip/pipeline_unclip.py index bf42d44f74..c5f84291f4 100644 --- a/src/diffusers/pipelines/unclip/pipeline_unclip.py +++ b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip.py @@ -20,11 +20,11 @@ from torch.nn import functional as F from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput -from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel -from ...schedulers import UnCLIPScheduler -from ...utils import is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....models import PriorTransformer, UNet2DConditionModel, UNet2DModel +from ....schedulers import UnCLIPScheduler +from ....utils import is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel diff --git a/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py similarity index 98% rename from src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py rename to src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py index 8fa0a848f7..1e1787362e 100644 --- a/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +++ b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py @@ -25,11 +25,11 @@ from transformers import ( CLIPVisionModelWithProjection, ) -from ...models import UNet2DConditionModel, UNet2DModel -from ...schedulers import UnCLIPScheduler -from ...utils import is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....models import UNet2DConditionModel, UNet2DModel +from ....schedulers import UnCLIPScheduler +from ....utils import is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel diff --git a/src/diffusers/pipelines/unclip/text_proj.py b/src/diffusers/pipelines/deprecated/unclip/text_proj.py similarity index 100% rename from src/diffusers/pipelines/unclip/text_proj.py rename to src/diffusers/pipelines/deprecated/unclip/text_proj.py diff --git a/src/diffusers/pipelines/unidiffuser/__init__.py b/src/diffusers/pipelines/deprecated/unidiffuser/__init__.py similarity index 91% rename from src/diffusers/pipelines/unidiffuser/__init__.py rename to src/diffusers/pipelines/deprecated/unidiffuser/__init__.py index 1ac2b09a6e..aeaba167dc 100644 --- a/src/diffusers/pipelines/unidiffuser/__init__.py +++ b/src/diffusers/pipelines/deprecated/unidiffuser/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -16,7 +16,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) @@ -35,7 +35,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) diff --git a/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py b/src/diffusers/pipelines/deprecated/unidiffuser/modeling_text_decoder.py similarity index 100% rename from src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py rename to src/diffusers/pipelines/deprecated/unidiffuser/modeling_text_decoder.py diff --git a/src/diffusers/pipelines/unidiffuser/modeling_uvit.py b/src/diffusers/pipelines/deprecated/unidiffuser/modeling_uvit.py similarity index 99% rename from src/diffusers/pipelines/unidiffuser/modeling_uvit.py rename to src/diffusers/pipelines/deprecated/unidiffuser/modeling_uvit.py index 1e285a9670..0b3175e2f2 100644 --- a/src/diffusers/pipelines/unidiffuser/modeling_uvit.py +++ b/src/diffusers/pipelines/deprecated/unidiffuser/modeling_uvit.py @@ -11,7 +11,7 @@ from ...models.attention_processor import Attention from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed from ...models.modeling_outputs import Transformer2DModelOutput from ...models.normalization import AdaLayerNorm -from ...utils import logging +from ....utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py b/src/diffusers/pipelines/deprecated/unidiffuser/pipeline_unidiffuser.py similarity index 98% rename from src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py rename to src/diffusers/pipelines/deprecated/unidiffuser/pipeline_unidiffuser.py index 66d7404fb9..40d30e9442 100644 --- a/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +++ b/src/diffusers/pipelines/deprecated/unidiffuser/pipeline_unidiffuser.py @@ -13,12 +13,12 @@ from transformers import ( GPT2Tokenizer, ) -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -26,8 +26,8 @@ from ...utils import ( scale_lora_layers, unscale_lora_layers, ) -from ...utils.outputs import BaseOutput -from ...utils.torch_utils import randn_tensor +from ....utils.outputs import BaseOutput +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel @@ -225,7 +225,7 @@ class UniDiffuserPipeline(DiffusionPipeline): return mode - # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing + # Copied from ..pipeline_utils.StableDiffusionMixin.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to @@ -233,7 +233,7 @@ class UniDiffuserPipeline(DiffusionPipeline): """ self.vae.enable_slicing() - # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing + # Copied from ..pipeline_utils.StableDiffusionMixin.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to @@ -241,7 +241,7 @@ class UniDiffuserPipeline(DiffusionPipeline): """ self.vae.disable_slicing() - # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_tiling + # Copied from ..pipeline_utils.StableDiffusionMixin.enable_vae_tiling def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to @@ -250,7 +250,7 @@ class UniDiffuserPipeline(DiffusionPipeline): """ self.vae.enable_tiling() - # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_tiling + # Copied from ..pipeline_utils.StableDiffusionMixin.disable_vae_tiling def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py index 4fb437958a..126dddd253 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -8,7 +8,7 @@ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPV from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import logging -from ...pipeline_utils import DiffusionPipeline +from ..pipeline_utils import DiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline diff --git a/src/diffusers/pipelines/wuerstchen/__init__.py b/src/diffusers/pipelines/deprecated/wuerstchen/__init__.py similarity index 91% rename from src/diffusers/pipelines/wuerstchen/__init__.py rename to src/diffusers/pipelines/deprecated/wuerstchen/__init__.py index ddb852d193..26f259512c 100644 --- a/src/diffusers/pipelines/wuerstchen/__init__.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects + from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -34,7 +34,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt diff --git a/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_paella_vq_model.py similarity index 99% rename from src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_paella_vq_model.py index b2cf8cbc97..ab8d259dc1 100644 --- a/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_paella_vq_model.py @@ -22,7 +22,7 @@ from ...configuration_utils import ConfigMixin, register_to_config from ...models.autoencoders.vae import DecoderOutput, VectorQuantizer from ...models.modeling_utils import ModelMixin from ...models.vq_model import VQEncoderOutput -from ...utils.accelerate_utils import apply_forward_hook +from ....utils.accelerate_utils import apply_forward_hook class MixingResidualBlock(nn.Module): diff --git a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_common.py similarity index 100% rename from src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_common.py diff --git a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_diffnext.py similarity index 100% rename from src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_diffnext.py diff --git a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_prior.py similarity index 100% rename from src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_prior.py diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py similarity index 98% rename from src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py rename to src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py index edc01f0d5c..43861bb9b6 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py @@ -18,10 +18,10 @@ import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...schedulers import DDPMWuerstchenScheduler -from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....schedulers import DDPMWuerstchenScheduler +from ....utils import deprecate, is_torch_xla_available, logging, replace_example_docstring +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_combined.py similarity index 99% rename from src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py rename to src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_combined.py index 7819c8c0a0..4d1bc7abf5 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_combined.py @@ -16,8 +16,8 @@ from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...schedulers import DDPMWuerstchenScheduler -from ...utils import deprecate, replace_example_docstring +from ....schedulers import DDPMWuerstchenScheduler +from ....utils import deprecate, replace_example_docstring from ..pipeline_utils import DiffusionPipeline from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py similarity index 98% rename from src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py rename to src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py index 8f6ba41972..7cef8a3be0 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py @@ -20,10 +20,10 @@ import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...loaders import StableDiffusionLoraLoaderMixin -from ...schedulers import DDPMWuerstchenScheduler -from ...utils import BaseOutput, deprecate, is_torch_xla_available, logging, replace_example_docstring -from ...utils.torch_utils import randn_tensor +from ....loaders import StableDiffusionLoraLoaderMixin +from ....schedulers import DDPMWuerstchenScheduler +from ....utils import BaseOutput, deprecate, is_torch_xla_available, logging, replace_example_docstring +from ....utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .modeling_wuerstchen_prior import WuerstchenPrior