mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
update
This commit is contained in:
@@ -46,7 +46,7 @@ An attention processor is a class for applying different types of attention mech
|
||||
|
||||
## CrossFrameAttnProcessor
|
||||
|
||||
[[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor
|
||||
[[autodoc]] pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor
|
||||
|
||||
## Custom Diffusion
|
||||
|
||||
@@ -163,4 +163,4 @@ An attention processor is a class for applying different types of attention mech
|
||||
|
||||
## XLAFluxFlashAttnProcessor2_0
|
||||
|
||||
[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0
|
||||
[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0
|
||||
|
||||
@@ -55,4 +55,4 @@ Sample output with I2VGenXL:
|
||||
- __call__
|
||||
|
||||
## I2VGenXLPipelineOutput
|
||||
[[autodoc]] pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput
|
||||
[[autodoc]] pipelines.deprecated.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput
|
||||
|
||||
@@ -31,5 +31,5 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers)
|
||||
- __call__
|
||||
|
||||
## SemanticStableDiffusionPipelineOutput
|
||||
[[autodoc]] pipelines.semantic_stable_diffusion.pipeline_output.SemanticStableDiffusionPipelineOutput
|
||||
[[autodoc]] pipelines.deprecated.semantic_stable_diffusion.pipeline_output.SemanticStableDiffusionPipelineOutput
|
||||
- all
|
||||
|
||||
@@ -34,4 +34,4 @@ See the [reuse components across pipelines](../../using-diffusers/loading#reuse-
|
||||
- __call__
|
||||
|
||||
## ShapEPipelineOutput
|
||||
[[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput
|
||||
[[autodoc]] pipelines.deprecated.shap_e.pipeline_shap_e.ShapEPipelineOutput
|
||||
|
||||
@@ -194,4 +194,4 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers)
|
||||
- __call__
|
||||
|
||||
## TextToVideoSDPipelineOutput
|
||||
[[autodoc]] pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput
|
||||
[[autodoc]] pipelines.deprecated.text_to_video_synthesis.TextToVideoSDPipelineOutput
|
||||
|
||||
@@ -145,7 +145,7 @@ The original codebase, as well as experimental ideas, can be found at [dome272/W
|
||||
|
||||
## WuerstchenPriorPipelineOutput
|
||||
|
||||
[[autodoc]] pipelines.wuerstchen.pipeline_wuerstchen_prior.WuerstchenPriorPipelineOutput
|
||||
[[autodoc]] pipelines.deprecated.wuerstchen.pipeline_wuerstchen_prior.WuerstchenPriorPipelineOutput
|
||||
|
||||
## WuerstchenDecoderPipeline
|
||||
|
||||
|
||||
@@ -578,8 +578,14 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
StableDiffusionPix2PixZeroPipeline,
|
||||
StableDiffusionSAGPipeline,
|
||||
StableDiffusionXLControlNetXSPipeline,
|
||||
TextToVideoSDPipeline,
|
||||
TextToVideoZeroPipeline,
|
||||
TextToVideoZeroSDXLPipeline,
|
||||
UnCLIPImageVariationPipeline,
|
||||
UnCLIPPipeline,
|
||||
UniDiffuserModel,
|
||||
UniDiffuserPipeline,
|
||||
UniDiffuserTextDecoder,
|
||||
VersatileDiffusionDualGuidedPipeline,
|
||||
VersatileDiffusionImageVariationPipeline,
|
||||
VersatileDiffusionPipeline,
|
||||
|
||||
@@ -222,7 +222,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
VideoToVideoSDPipeline,
|
||||
)
|
||||
from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline
|
||||
from .unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
|
||||
from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder
|
||||
from .versatile_diffusion import (
|
||||
VersatileDiffusionDualGuidedPipeline,
|
||||
VersatileDiffusionImageVariationPipeline,
|
||||
|
||||
Reference in New Issue
Block a user