1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-29 07:22:12 +03:00
This commit is contained in:
DN6
2025-08-25 13:23:40 +05:30
parent 12b4edc2fe
commit 3b2e85d853
5 changed files with 62 additions and 14 deletions

View File

@@ -12,19 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Callable, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import deprecate, logging
from ..utils import logging
from ..utils.import_utils import is_torch_npu_available, is_torch_xla_available, is_xformers_available
from ..utils.torch_utils import maybe_allow_in_graph
from .activations import GEGLU, GELU, ApproximateGELU, FP32SiLU, LinearActivation, SwiGLU
from .attention_processor import Attention, AttentionProcessor, JointAttnProcessor2_0
from .embeddings import SinusoidalPositionalEmbedding
from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm, SD35AdaLayerNormZeroX
from .attention_processor import AttentionProcessor
if is_xformers_available():
@@ -511,7 +507,12 @@ class AttentionModuleMixin:
def _chunked_feed_forward(*args, **kwargs):
"""Backward compatibility stub. Use transformers.modeling_common._chunked_feed_forward instead."""
logger.warning(
"Importing `_chunked_feed_forward` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import _chunked_feed_forward` instead."
)
from .transformers.modeling_common import _chunked_feed_forward as _actual_chunked_feed_forward
return _actual_chunked_feed_forward(*args, **kwargs)
@@ -519,8 +520,14 @@ class GatedSelfAttentionDense:
r"""
Backward compatibility stub. Use transformers.modeling_common.GatedSelfAttentionDense instead.
"""
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `GatedSelfAttentionDense` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import GatedSelfAttentionDense` instead."
)
from .transformers.modeling_common import GatedSelfAttentionDense
return GatedSelfAttentionDense(*args, **kwargs)
@@ -528,8 +535,14 @@ class JointTransformerBlock:
r"""
Backward compatibility stub. Use transformers.modeling_common.JointTransformerBlock instead.
"""
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `JointTransformerBlock` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import JointTransformerBlock` instead."
)
from .transformers.modeling_common import JointTransformerBlock
return JointTransformerBlock(*args, **kwargs)
@@ -537,8 +550,14 @@ class BasicTransformerBlock:
r"""
Backward compatibility stub. Use transformers.modeling_common.BasicTransformerBlock instead.
"""
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `BasicTransformerBlock` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import BasicTransformerBlock` instead."
)
from .transformers.modeling_common import BasicTransformerBlock
return BasicTransformerBlock(*args, **kwargs)
@@ -546,8 +565,14 @@ class LuminaFeedForward:
r"""
Backward compatibility stub. Use transformers.modeling_common.LuminaFeedForward instead.
"""
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `LuminaFeedForward` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import LuminaFeedForward` instead."
)
from .transformers.modeling_common import LuminaFeedForward
return LuminaFeedForward(*args, **kwargs)
@@ -555,8 +580,14 @@ class TemporalBasicTransformerBlock:
r"""
Backward compatibility stub. Use transformers.modeling_common.TemporalBasicTransformerBlock instead.
"""
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `TemporalBasicTransformerBlock` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import TemporalBasicTransformerBlock` instead."
)
from .transformers.modeling_common import TemporalBasicTransformerBlock
return TemporalBasicTransformerBlock(*args, **kwargs)
@@ -564,8 +595,14 @@ class SkipFFTransformerBlock:
r"""
Backward compatibility stub. Use transformers.modeling_common.SkipFFTransformerBlock instead.
"""
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `SkipFFTransformerBlock` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import SkipFFTransformerBlock` instead."
)
from .transformers.modeling_common import SkipFFTransformerBlock
return SkipFFTransformerBlock(*args, **kwargs)
@@ -573,16 +610,27 @@ class FreeNoiseTransformerBlock:
r"""
Backward compatibility stub. Use transformers.modeling_common.FreeNoiseTransformerBlock instead.
"""
def __new__(cls, *args, **kwargs):
from .transformers.modeling_common import FreeNoiseTransformerBlock
return FreeNoiseTransformerBlock(*args, **kwargs)
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `FreeNoiseTransformerBlock` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import FreeNoiseTransformerBlock` instead."
)
from .transformers.modeling_common import FreeNoiseTransformerBlock
return FreeNoiseTransformerBlock(*args, **kwargs)
class FeedForward:
r"""
Backward compatibility stub. Use transformers.modeling_common.FeedForward instead.
"""
def __new__(cls, *args, **kwargs):
logger.warning(
"Importing `FeedForward` from `diffusers.models.attention` is deprecated and will be removed in a future version. "
"Please use `from diffusers.models.transformers.modeling_common import FeedForward` instead."
)
from .transformers.modeling_common import FeedForward
return FeedForward(*args, **kwargs)

View File

@@ -19,10 +19,10 @@ from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...utils import logging
from .modeling_common import BasicTransformerBlock
from ..embeddings import PatchEmbed
from ..modeling_outputs import Transformer2DModelOutput
from ..modeling_utils import ModelMixin
from .modeling_common import BasicTransformerBlock
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

View File

@@ -1255,4 +1255,4 @@ class FeedForward(nn.Module):
deprecate("scale", "1.0.0", deprecation_message)
for module in self.net:
hidden_states = module(hidden_states)
return hidden_states
return hidden_states

View File

@@ -19,11 +19,11 @@ from torch import nn
from ...configuration_utils import LegacyConfigMixin, register_to_config
from ...utils import deprecate, logging
from .modeling_common import BasicTransformerBlock
from ..embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection
from ..modeling_outputs import Transformer2DModelOutput
from ..modeling_utils import LegacyModelMixin
from ..normalization import AdaLayerNormSingle
from .modeling_common import BasicTransformerBlock
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

View File

@@ -20,7 +20,6 @@ from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import FromOriginalModelMixin, PeftAdapterMixin, SD3Transformer2DLoadersMixin
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
from ...utils.torch_utils import maybe_allow_in_graph
from .modeling_common import FeedForward, JointTransformerBlock
from ..attention_processor import (
Attention,
AttentionProcessor,
@@ -31,6 +30,7 @@ from ..embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
from ..modeling_outputs import Transformer2DModelOutput
from ..modeling_utils import ModelMixin
from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero
from .modeling_common import FeedForward, JointTransformerBlock
logger = logging.get_logger(__name__) # pylint: disable=invalid-name