From 1a25d5491723bcd626ec1fc8acd10f880177ecce Mon Sep 17 00:00:00 2001 From: DN6 Date: Mon, 21 Apr 2025 17:18:51 +0530 Subject: [PATCH] update --- docs/source/en/api/attnprocessor.md | 4 ++-- docs/source/en/api/pipelines/i2vgenxl.md | 2 +- docs/source/en/api/pipelines/semantic_stable_diffusion.md | 2 +- docs/source/en/api/pipelines/shap_e.md | 2 +- docs/source/en/api/pipelines/text_to_video.md | 2 +- docs/source/en/api/pipelines/wuerstchen.md | 2 +- src/diffusers/pipelines/__init__.py | 6 ++++++ src/diffusers/pipelines/deprecated/__init__.py | 2 +- 8 files changed, 14 insertions(+), 8 deletions(-) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index 638ecb973e..e9c542da8c 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -46,7 +46,7 @@ An attention processor is a class for applying different types of attention mech ## CrossFrameAttnProcessor -[[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor +[[autodoc]] pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor ## Custom Diffusion @@ -163,4 +163,4 @@ An attention processor is a class for applying different types of attention mech ## XLAFluxFlashAttnProcessor2_0 -[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0 \ No newline at end of file +[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0 diff --git a/docs/source/en/api/pipelines/i2vgenxl.md b/docs/source/en/api/pipelines/i2vgenxl.md index 3994f91d2c..00d31d31e7 100644 --- a/docs/source/en/api/pipelines/i2vgenxl.md +++ b/docs/source/en/api/pipelines/i2vgenxl.md @@ -55,4 +55,4 @@ Sample output with I2VGenXL: - __call__ ## I2VGenXLPipelineOutput -[[autodoc]] pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput \ No newline at end of file +[[autodoc]] pipelines.deprecated.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput diff --git a/docs/source/en/api/pipelines/semantic_stable_diffusion.md b/docs/source/en/api/pipelines/semantic_stable_diffusion.md index b9aacd3518..4e8e5918d9 100644 --- a/docs/source/en/api/pipelines/semantic_stable_diffusion.md +++ b/docs/source/en/api/pipelines/semantic_stable_diffusion.md @@ -31,5 +31,5 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) - __call__ ## SemanticStableDiffusionPipelineOutput -[[autodoc]] pipelines.semantic_stable_diffusion.pipeline_output.SemanticStableDiffusionPipelineOutput +[[autodoc]] pipelines.deprecated.semantic_stable_diffusion.pipeline_output.SemanticStableDiffusionPipelineOutput - all diff --git a/docs/source/en/api/pipelines/shap_e.md b/docs/source/en/api/pipelines/shap_e.md index 3c1f939c1f..fb8f38d98a 100644 --- a/docs/source/en/api/pipelines/shap_e.md +++ b/docs/source/en/api/pipelines/shap_e.md @@ -34,4 +34,4 @@ See the [reuse components across pipelines](../../using-diffusers/loading#reuse- - __call__ ## ShapEPipelineOutput -[[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput +[[autodoc]] pipelines.deprecated.shap_e.pipeline_shap_e.ShapEPipelineOutput diff --git a/docs/source/en/api/pipelines/text_to_video.md b/docs/source/en/api/pipelines/text_to_video.md index 5eb1dd1a9d..7c301bf38c 100644 --- a/docs/source/en/api/pipelines/text_to_video.md +++ b/docs/source/en/api/pipelines/text_to_video.md @@ -194,4 +194,4 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) - __call__ ## TextToVideoSDPipelineOutput -[[autodoc]] pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput +[[autodoc]] pipelines.deprecated.text_to_video_synthesis.TextToVideoSDPipelineOutput diff --git a/docs/source/en/api/pipelines/wuerstchen.md b/docs/source/en/api/pipelines/wuerstchen.md index da6ef2cffc..788433fdbd 100644 --- a/docs/source/en/api/pipelines/wuerstchen.md +++ b/docs/source/en/api/pipelines/wuerstchen.md @@ -145,7 +145,7 @@ The original codebase, as well as experimental ideas, can be found at [dome272/W ## WuerstchenPriorPipelineOutput -[[autodoc]] pipelines.wuerstchen.pipeline_wuerstchen_prior.WuerstchenPriorPipelineOutput +[[autodoc]] pipelines.deprecated.wuerstchen.pipeline_wuerstchen_prior.WuerstchenPriorPipelineOutput ## WuerstchenDecoderPipeline diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 2e9e8f925d..f78cd34a66 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -578,8 +578,14 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: StableDiffusionPix2PixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionXLControlNetXSPipeline, + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, + UniDiffuserModel, + UniDiffuserPipeline, + UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, diff --git a/src/diffusers/pipelines/deprecated/__init__.py b/src/diffusers/pipelines/deprecated/__init__.py index 04c76a15e8..0097bf3ff5 100644 --- a/src/diffusers/pipelines/deprecated/__init__.py +++ b/src/diffusers/pipelines/deprecated/__init__.py @@ -222,7 +222,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: VideoToVideoSDPipeline, ) from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline - from .unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline + from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder from .versatile_diffusion import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline,