1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-29 07:22:12 +03:00
This commit is contained in:
DN6
2025-04-21 17:18:51 +05:30
parent 53efae0af2
commit 1a25d54917
8 changed files with 14 additions and 8 deletions

View File

@@ -46,7 +46,7 @@ An attention processor is a class for applying different types of attention mech
## CrossFrameAttnProcessor
[[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor
[[autodoc]] pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor
## Custom Diffusion
@@ -163,4 +163,4 @@ An attention processor is a class for applying different types of attention mech
## XLAFluxFlashAttnProcessor2_0
[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0
[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0

View File

@@ -55,4 +55,4 @@ Sample output with I2VGenXL:
- __call__
## I2VGenXLPipelineOutput
[[autodoc]] pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput
[[autodoc]] pipelines.deprecated.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput

View File

@@ -31,5 +31,5 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers)
- __call__
## SemanticStableDiffusionPipelineOutput
[[autodoc]] pipelines.semantic_stable_diffusion.pipeline_output.SemanticStableDiffusionPipelineOutput
[[autodoc]] pipelines.deprecated.semantic_stable_diffusion.pipeline_output.SemanticStableDiffusionPipelineOutput
- all

View File

@@ -34,4 +34,4 @@ See the [reuse components across pipelines](../../using-diffusers/loading#reuse-
- __call__
## ShapEPipelineOutput
[[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput
[[autodoc]] pipelines.deprecated.shap_e.pipeline_shap_e.ShapEPipelineOutput

View File

@@ -194,4 +194,4 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers)
- __call__
## TextToVideoSDPipelineOutput
[[autodoc]] pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput
[[autodoc]] pipelines.deprecated.text_to_video_synthesis.TextToVideoSDPipelineOutput

View File

@@ -145,7 +145,7 @@ The original codebase, as well as experimental ideas, can be found at [dome272/W
## WuerstchenPriorPipelineOutput
[[autodoc]] pipelines.wuerstchen.pipeline_wuerstchen_prior.WuerstchenPriorPipelineOutput
[[autodoc]] pipelines.deprecated.wuerstchen.pipeline_wuerstchen_prior.WuerstchenPriorPipelineOutput
## WuerstchenDecoderPipeline

View File

@@ -578,8 +578,14 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
StableDiffusionPix2PixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionXLControlNetXSPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
TextToVideoZeroSDXLPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,

View File

@@ -222,7 +222,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
VideoToVideoSDPipeline,
)
from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline
from .unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder
from .versatile_diffusion import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,