mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
amused rename Update docs/source/en/api/pipelines/amused.md Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> AdaLayerNormContinuous default values custom micro conditioning micro conditioning docs put lookup from codebook in constructor fix conversion script remove manual fused flash attn kernel add training script temp remove training script add dummy gradient checkpointing func clarify temperatures is an instance variable by setting it remove additional SkipFF block args hardcode norm args rename tests folder fix paths and samples fix tests add training script training readme lora saving and loading non-lora saving/loading some readme fixes guards Update docs/source/en/api/pipelines/amused.md Co-authored-by: Suraj Patil <surajp815@gmail.com> Update examples/amused/README.md Co-authored-by: Suraj Patil <surajp815@gmail.com> Update examples/amused/train_amused.py Co-authored-by: Suraj Patil <surajp815@gmail.com> vae upcasting add fp16 integration tests use tuple for micro cond copyrights remove casts delegate to torch.nn.LayerNorm move temperature to pipeline call upsampling/downsampling changes
97 lines
4.3 KiB
Python
97 lines
4.3 KiB
Python
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from typing import TYPE_CHECKING
|
|
|
|
from ..utils import (
|
|
DIFFUSERS_SLOW_IMPORT,
|
|
_LazyModule,
|
|
is_flax_available,
|
|
is_torch_available,
|
|
)
|
|
|
|
|
|
_import_structure = {}
|
|
|
|
if is_torch_available():
|
|
_import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"]
|
|
_import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"]
|
|
_import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"]
|
|
_import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"]
|
|
_import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"]
|
|
_import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"]
|
|
_import_structure["controlnet"] = ["ControlNetModel"]
|
|
_import_structure["controlnetxs"] = ["ControlNetXSModel"]
|
|
_import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"]
|
|
_import_structure["embeddings"] = ["ImageProjection"]
|
|
_import_structure["modeling_utils"] = ["ModelMixin"]
|
|
_import_structure["prior_transformer"] = ["PriorTransformer"]
|
|
_import_structure["t5_film_transformer"] = ["T5FilmDecoder"]
|
|
_import_structure["transformer_2d"] = ["Transformer2DModel"]
|
|
_import_structure["transformer_temporal"] = ["TransformerTemporalModel"]
|
|
_import_structure["unet_1d"] = ["UNet1DModel"]
|
|
_import_structure["unet_2d"] = ["UNet2DModel"]
|
|
_import_structure["unet_2d_condition"] = ["UNet2DConditionModel"]
|
|
_import_structure["unet_3d_condition"] = ["UNet3DConditionModel"]
|
|
_import_structure["unet_kandinsky3"] = ["Kandinsky3UNet"]
|
|
_import_structure["unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"]
|
|
_import_structure["unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"]
|
|
_import_structure["uvit_2d"] = ["UVit2DModel"]
|
|
_import_structure["vq_model"] = ["VQModel"]
|
|
|
|
if is_flax_available():
|
|
_import_structure["controlnet_flax"] = ["FlaxControlNetModel"]
|
|
_import_structure["unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
|
|
_import_structure["vae_flax"] = ["FlaxAutoencoderKL"]
|
|
|
|
|
|
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
|
if is_torch_available():
|
|
from .adapter import MultiAdapter, T2IAdapter
|
|
from .autoencoders import (
|
|
AsymmetricAutoencoderKL,
|
|
AutoencoderKL,
|
|
AutoencoderKLTemporalDecoder,
|
|
AutoencoderTiny,
|
|
ConsistencyDecoderVAE,
|
|
)
|
|
from .controlnet import ControlNetModel
|
|
from .controlnetxs import ControlNetXSModel
|
|
from .dual_transformer_2d import DualTransformer2DModel
|
|
from .embeddings import ImageProjection
|
|
from .modeling_utils import ModelMixin
|
|
from .prior_transformer import PriorTransformer
|
|
from .t5_film_transformer import T5FilmDecoder
|
|
from .transformer_2d import Transformer2DModel
|
|
from .transformer_temporal import TransformerTemporalModel
|
|
from .unet_1d import UNet1DModel
|
|
from .unet_2d import UNet2DModel
|
|
from .unet_2d_condition import UNet2DConditionModel
|
|
from .unet_3d_condition import UNet3DConditionModel
|
|
from .unet_kandinsky3 import Kandinsky3UNet
|
|
from .unet_motion_model import MotionAdapter, UNetMotionModel
|
|
from .unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel
|
|
from .uvit_2d import UVit2DModel
|
|
from .vq_model import VQModel
|
|
|
|
if is_flax_available():
|
|
from .controlnet_flax import FlaxControlNetModel
|
|
from .unet_2d_condition_flax import FlaxUNet2DConditionModel
|
|
from .vae_flax import FlaxAutoencoderKL
|
|
|
|
else:
|
|
import sys
|
|
|
|
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|