mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
update
This commit is contained in:
@@ -35,13 +35,13 @@ def pytest_configure(config):
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
from diffusers.utils.testing_utils import pytest_addoption_shared
|
||||
from .testing_utils import pytest_addoption_shared
|
||||
|
||||
pytest_addoption_shared(parser)
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
from diffusers.utils.testing_utils import pytest_terminal_summary_main
|
||||
from .testing_utils import pytest_terminal_summary_main
|
||||
|
||||
make_reports = terminalreporter.config.getoption("--make-reports")
|
||||
if make_reports:
|
||||
|
||||
0
tests/hooks/__init__.py
Normal file
0
tests/hooks/__init__.py
Normal file
@@ -24,7 +24,8 @@ from diffusers.models import ModelMixin
|
||||
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.utils import get_logger
|
||||
from diffusers.utils.import_utils import compare_versions
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
backend_empty_cache,
|
||||
backend_max_memory_allocated,
|
||||
backend_reset_peak_memory_stats,
|
||||
|
||||
@@ -20,7 +20,8 @@ import torch
|
||||
from diffusers.hooks import HookRegistry, ModelHook
|
||||
from diffusers.training_utils import free_memory
|
||||
from diffusers.utils.logging import get_logger
|
||||
from diffusers.utils.testing_utils import CaptureLogger, torch_device
|
||||
|
||||
from ..testing_utils import CaptureLogger, torch_device
|
||||
|
||||
|
||||
logger = get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
0
tests/lora/__init__.py
Normal file
0
tests/lora/__init__.py
Normal file
@@ -23,7 +23,8 @@ from diffusers import (
|
||||
AuraFlowTransformer2DModel,
|
||||
FlowMatchEulerDiscreteScheduler,
|
||||
)
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
floats_tensor,
|
||||
is_peft_available,
|
||||
require_peft_backend,
|
||||
@@ -35,7 +36,7 @@ if is_peft_available():
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -26,7 +26,8 @@ from diffusers import (
|
||||
CogVideoXPipeline,
|
||||
CogVideoXTransformer3DModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
floats_tensor,
|
||||
require_peft_backend,
|
||||
require_torch_accelerator,
|
||||
@@ -35,7 +36,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -22,7 +22,8 @@ from parameterized import parameterized
|
||||
from transformers import AutoTokenizer, GlmModel
|
||||
|
||||
from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
floats_tensor,
|
||||
require_peft_backend,
|
||||
require_torch_accelerator,
|
||||
@@ -33,7 +34,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
class TokenizerWrapper:
|
||||
|
||||
@@ -28,7 +28,8 @@ from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderM
|
||||
|
||||
from diffusers import FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxPipeline, FluxTransformer2DModel
|
||||
from diffusers.utils import load_image, logging
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
CaptureLogger,
|
||||
backend_empty_cache,
|
||||
floats_tensor,
|
||||
@@ -48,7 +49,7 @@ if is_peft_available():
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -26,7 +26,8 @@ from diffusers import (
|
||||
HunyuanVideoPipeline,
|
||||
HunyuanVideoTransformer3DModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
Expectations,
|
||||
backend_empty_cache,
|
||||
floats_tensor,
|
||||
@@ -42,7 +43,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -24,12 +24,13 @@ from diffusers import (
|
||||
LTXPipeline,
|
||||
LTXVideoTransformer3DModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend
|
||||
|
||||
from ..testing_utils import floats_tensor, require_peft_backend
|
||||
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -26,12 +26,13 @@ from diffusers import (
|
||||
Lumina2Pipeline,
|
||||
Lumina2Transformer2DModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device
|
||||
|
||||
from ..testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device
|
||||
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -19,7 +19,8 @@ import torch
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
floats_tensor,
|
||||
require_peft_backend,
|
||||
skip_mps,
|
||||
@@ -28,7 +29,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -24,12 +24,13 @@ from diffusers import (
|
||||
QwenImagePipeline,
|
||||
QwenImageTransformer2DModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend
|
||||
|
||||
from ..testing_utils import floats_tensor, require_peft_backend
|
||||
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -19,12 +19,13 @@ import torch
|
||||
from transformers import Gemma2Model, GemmaTokenizer
|
||||
|
||||
from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel
|
||||
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend
|
||||
|
||||
from ..testing_utils import floats_tensor, require_peft_backend
|
||||
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -32,7 +32,8 @@ from diffusers import (
|
||||
StableDiffusionPipeline,
|
||||
)
|
||||
from diffusers.utils.import_utils import is_accelerate_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
Expectations,
|
||||
backend_empty_cache,
|
||||
load_image,
|
||||
@@ -47,7 +48,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
||||
|
||||
|
||||
if is_accelerate_available():
|
||||
|
||||
@@ -28,7 +28,8 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.utils import load_image
|
||||
from diffusers.utils.import_utils import is_accelerate_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
backend_empty_cache,
|
||||
is_flaky,
|
||||
nightly,
|
||||
@@ -42,7 +43,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
if is_accelerate_available():
|
||||
|
||||
@@ -35,7 +35,8 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_accelerate_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
CaptureLogger,
|
||||
backend_empty_cache,
|
||||
is_flaky,
|
||||
@@ -51,7 +52,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402
|
||||
|
||||
|
||||
if is_accelerate_available():
|
||||
|
||||
@@ -24,7 +24,8 @@ from diffusers import (
|
||||
WanPipeline,
|
||||
WanTransformer3DModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
floats_tensor,
|
||||
require_peft_backend,
|
||||
skip_mps,
|
||||
@@ -33,7 +34,7 @@ from diffusers.utils.testing_utils import (
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -25,7 +25,8 @@ from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel
|
||||
from diffusers.utils.import_utils import is_peft_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
floats_tensor,
|
||||
is_flaky,
|
||||
require_peft_backend,
|
||||
@@ -40,7 +41,7 @@ if is_peft_available():
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
||||
|
||||
|
||||
@require_peft_backend
|
||||
|
||||
@@ -32,7 +32,8 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_peft_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
CaptureLogger,
|
||||
check_if_dicts_are_equal,
|
||||
floats_tensor,
|
||||
|
||||
@@ -21,7 +21,8 @@ from parameterized import parameterized
|
||||
|
||||
from diffusers import AsymmetricAutoencoderKL
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
Expectations,
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
@@ -34,7 +35,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
import unittest
|
||||
|
||||
from diffusers import AutoencoderKLCosmos
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, floats_tensor, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
import unittest
|
||||
|
||||
from diffusers import AutoencoderDC
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -19,12 +19,12 @@ import torch
|
||||
|
||||
from diffusers import AutoencoderKLHunyuanVideo
|
||||
from diffusers.models.autoencoders.autoencoder_kl_hunyuan_video import prepare_causal_attention_mask
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ from parameterized import parameterized
|
||||
|
||||
from diffusers import AutoencoderKL
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
@@ -34,7 +35,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,12 +18,12 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import AutoencoderKLCogVideoX
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
import unittest
|
||||
|
||||
from diffusers import AutoencoderKLTemporalDecoder
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,12 +18,12 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import AutoencoderKLLTXVideo
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
import unittest
|
||||
|
||||
from diffusers import AutoencoderKLMagvit
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, floats_tensor, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
import unittest
|
||||
|
||||
from diffusers import AutoencoderKLMochi
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ from datasets import load_dataset
|
||||
from parameterized import parameterized
|
||||
|
||||
from diffusers import AutoencoderOobleck
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
@@ -29,7 +30,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ import torch
|
||||
from parameterized import parameterized
|
||||
|
||||
from diffusers import AutoencoderTiny
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
@@ -30,7 +31,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import AutoencoderKLWan
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, floats_tensor, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,9 @@ import numpy as np
|
||||
import torch
|
||||
|
||||
from diffusers import ConsistencyDecoderVAE, StableDiffusionPipeline
|
||||
from diffusers.utils.testing_utils import (
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
load_image,
|
||||
@@ -28,8 +30,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@ import unittest
|
||||
|
||||
from diffusers import FlaxAutoencoderKL
|
||||
from diffusers.utils import is_flax_available
|
||||
from diffusers.utils.testing_utils import require_flax
|
||||
|
||||
from ...testing_utils import require_flax
|
||||
from ..test_modeling_common_flax import FlaxModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,13 +18,13 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import VQModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_manual_seed,
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,8 @@ import torch
|
||||
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
|
||||
from diffusers.utils.testing_utils import torch_device
|
||||
|
||||
from ..testing_utils import torch_device
|
||||
|
||||
|
||||
class AttnAddedKVProcessorTests(unittest.TestCase):
|
||||
|
||||
@@ -24,7 +24,8 @@ from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU
|
||||
from diffusers.models.embeddings import get_timestep_embedding
|
||||
from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
|
||||
from diffusers.models.transformers.transformer_2d import Transformer2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
backend_manual_seed,
|
||||
require_torch_accelerator_with_fp64,
|
||||
require_torch_version_greater_equal,
|
||||
|
||||
@@ -59,7 +59,10 @@ from diffusers.utils import (
|
||||
logging,
|
||||
)
|
||||
from diffusers.utils.hub_utils import _add_variant
|
||||
from diffusers.utils.testing_utils import (
|
||||
from diffusers.utils.torch_utils import get_torch_cuda_device_capability
|
||||
|
||||
from ..others.test_utils import TOKEN, USER, is_staging_test
|
||||
from ..testing_utils import (
|
||||
CaptureLogger,
|
||||
_check_safetensors_serialization,
|
||||
backend_empty_cache,
|
||||
@@ -82,9 +85,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
from diffusers.utils.torch_utils import get_torch_cuda_device_capability
|
||||
|
||||
from ..others.test_utils import TOKEN, USER, is_staging_test
|
||||
|
||||
|
||||
if is_peft_available():
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import inspect
|
||||
|
||||
from diffusers.utils import is_flax_available
|
||||
from diffusers.utils.testing_utils import require_flax
|
||||
|
||||
from ..testing_utils import require_flax
|
||||
|
||||
|
||||
if is_flax_available():
|
||||
|
||||
@@ -18,13 +18,13 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import DiTTransformer2DModel, Transformer2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,13 +18,13 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import PixArtTransformer2DModel, Transformer2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ import torch
|
||||
from parameterized import parameterized
|
||||
|
||||
from diffusers import PriorTransformer
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
@@ -29,7 +30,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,11 +17,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import AllegroTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import AuraFlowTransformer2DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ import torch
|
||||
from diffusers import BriaTransformer2DModel
|
||||
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
|
||||
from diffusers.models.embeddings import ImageProjection
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ import torch
|
||||
from diffusers import ChromaTransformer2DModel
|
||||
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
|
||||
from diffusers.models.embeddings import ImageProjection
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import CogVideoXTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import CogView3PlusTransformer2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import CogView4Transformer2DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import ConsisIDTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import CosmosTransformer3DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import EasyAnimateTransformer3DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ import torch
|
||||
from diffusers import FluxTransformer2DModel
|
||||
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
|
||||
from diffusers.models.embeddings import ImageProjection
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, is_peft_available, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, is_peft_available, torch_device
|
||||
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import HiDreamImageTransformer2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import HunyuanDiT2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,11 +17,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import HunyuanVideoTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,11 +17,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import HunyuanVideoFramepackTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import LatteTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import LTXVideoTransformer3DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import LuminaNextDiT2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import Lumina2Transformer2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import MochiTransformer3DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import OmniGenTransformer2DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ import pytest
|
||||
import torch
|
||||
|
||||
from diffusers import QwenImageTransformer2DModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,11 +17,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import SanaTransformer2DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -19,11 +19,11 @@ import torch
|
||||
|
||||
from diffusers import SD3Transformer2DModel
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,11 +17,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import SkyReelsV2Transformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers.models.transformers import TransformerTemporalModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -17,11 +17,11 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import WanTransformer3DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -19,13 +19,13 @@ import pytest
|
||||
import torch
|
||||
|
||||
from diffusers import UNet1DModel
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_manual_seed,
|
||||
floats_tensor,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ import torch
|
||||
|
||||
from diffusers import UNet2DModel
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
@@ -30,7 +31,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -34,7 +34,8 @@ from diffusers.models.attention_processor import (
|
||||
from diffusers.models.embeddings import ImageProjection, IPAdapterFaceIDImageProjection, IPAdapterPlusImageProjection
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
backend_max_memory_allocated,
|
||||
backend_reset_max_memory_allocated,
|
||||
@@ -51,7 +52,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import (
|
||||
LoraHotSwappingForModelTesterMixin,
|
||||
ModelTesterMixin,
|
||||
|
||||
@@ -5,7 +5,8 @@ from parameterized import parameterized
|
||||
|
||||
from diffusers import FlaxUNet2DConditionModel
|
||||
from diffusers.utils import is_flax_available
|
||||
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
|
||||
|
||||
from ...testing_utils import load_hf_numpy, require_flax, slow
|
||||
|
||||
|
||||
if is_flax_available():
|
||||
|
||||
@@ -21,8 +21,8 @@ import torch
|
||||
from diffusers.models import ModelMixin, UNet3DConditionModel
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -21,8 +21,8 @@ from torch import nn
|
||||
|
||||
from diffusers import ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, is_flaky, torch_device
|
||||
|
||||
from ...testing_utils import enable_full_determinism, floats_tensor, is_flaky, torch_device
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -24,12 +24,12 @@ import torch
|
||||
from diffusers import MotionAdapter, UNet2DConditionModel, UNetMotionModel
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -21,13 +21,13 @@ import torch
|
||||
from diffusers import UNetSpatioTemporalConditionModel
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
skip_mps,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
import unittest
|
||||
|
||||
from diffusers.models.unets.unet_2d_blocks import * # noqa F403
|
||||
from diffusers.utils.testing_utils import torch_device
|
||||
|
||||
from ...testing_utils import torch_device
|
||||
from .test_unet_blocks_common import UNetBlockTesterMixin
|
||||
|
||||
|
||||
|
||||
@@ -16,14 +16,15 @@ from typing import Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from diffusers.utils.testing_utils import (
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
from ...testing_utils import (
|
||||
floats_tensor,
|
||||
require_torch,
|
||||
require_torch_accelerator_with_training,
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
|
||||
@require_torch
|
||||
|
||||
@@ -27,11 +27,6 @@ from diffusers import (
|
||||
StableDiffusionXLModularPipeline,
|
||||
)
|
||||
from diffusers.loaders import ModularIPAdapterMixin
|
||||
from diffusers.utils.testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ...models.unets.test_models_unet_2d_condition import (
|
||||
create_ip_adapter_state_dict,
|
||||
@@ -39,6 +34,11 @@ from ...models.unets.test_models_unet_2d_condition import (
|
||||
from ..test_modular_pipelines_common import (
|
||||
ModularPipelineTesterMixin,
|
||||
)
|
||||
from ..testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
|
||||
enable_full_determinism()
|
||||
|
||||
@@ -9,7 +9,8 @@ import torch
|
||||
import diffusers
|
||||
from diffusers import ComponentsManager, ModularPipeline, ModularPipelineBlocks
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ..testing_utils import (
|
||||
backend_empty_cache,
|
||||
numpy_cosine_similarity_distance,
|
||||
require_accelerator,
|
||||
|
||||
@@ -28,7 +28,8 @@ from diffusers import (
|
||||
logging,
|
||||
)
|
||||
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
||||
from diffusers.utils.testing_utils import CaptureLogger
|
||||
|
||||
from ..testing_utils import CaptureLogger
|
||||
|
||||
|
||||
class SampleObject(ConfigMixin):
|
||||
|
||||
@@ -20,7 +20,8 @@ import torch
|
||||
|
||||
from diffusers import UNet2DConditionModel
|
||||
from diffusers.training_utils import EMAModel
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device
|
||||
|
||||
from ..testing_utils import enable_full_determinism, skip_mps, torch_device
|
||||
|
||||
|
||||
enable_full_determinism()
|
||||
|
||||
@@ -7,7 +7,8 @@ import numpy as np
|
||||
import PIL.Image
|
||||
|
||||
from diffusers.utils.outputs import BaseOutput
|
||||
from diffusers.utils.testing_utils import require_torch
|
||||
|
||||
from ..testing_utils import require_torch
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -19,7 +19,8 @@ import torch
|
||||
|
||||
from diffusers import DDIMScheduler, DDPMScheduler, UNet2DModel
|
||||
from diffusers.training_utils import set_seed
|
||||
from diffusers.utils.testing_utils import slow
|
||||
|
||||
from ..testing_utils import slow
|
||||
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = False
|
||||
|
||||
@@ -20,7 +20,8 @@ import pytest
|
||||
|
||||
from diffusers import __version__
|
||||
from diffusers.utils import deprecate
|
||||
from diffusers.utils.testing_utils import Expectations, str_to_bool
|
||||
|
||||
from ..testing_utils import Expectations, str_to_bool
|
||||
|
||||
|
||||
# Used to test the hub
|
||||
|
||||
@@ -23,7 +23,8 @@ import torch
|
||||
from transformers import AutoTokenizer, T5Config, T5EncoderModel
|
||||
|
||||
from diffusers import AllegroPipeline, AllegroTransformer3DModel, AutoencoderKLAllegro, DDIMScheduler
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
numpy_cosine_similarity_distance,
|
||||
@@ -33,7 +34,6 @@ from diffusers.utils.testing_utils import (
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np
|
||||
|
||||
|
||||
@@ -19,7 +19,8 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.models.attention import FreeNoiseTransformerBlock
|
||||
from diffusers.utils import is_xformers_available, logging
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
numpy_cosine_similarity_distance,
|
||||
require_accelerator,
|
||||
@@ -27,7 +28,6 @@ from diffusers.utils.testing_utils import (
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
from ..test_pipelines_common import (
|
||||
IPAdapterTesterMixin,
|
||||
|
||||
@@ -21,8 +21,8 @@ from diffusers import (
|
||||
from diffusers.models.attention import FreeNoiseTransformerBlock
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import require_accelerator, torch_device
|
||||
|
||||
from ...testing_utils import require_accelerator, torch_device
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
from ..test_pipelines_common import (
|
||||
IPAdapterTesterMixin,
|
||||
|
||||
@@ -14,8 +14,8 @@ from diffusers import (
|
||||
UNetMotionModel,
|
||||
)
|
||||
from diffusers.utils import is_xformers_available, logging
|
||||
from diffusers.utils.testing_utils import require_accelerator, torch_device
|
||||
|
||||
from ...testing_utils import require_accelerator, torch_device
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
from ..test_pipelines_common import (
|
||||
IPAdapterTesterMixin,
|
||||
|
||||
@@ -20,8 +20,8 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.utils import logging
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import require_accelerator, torch_device
|
||||
|
||||
from ...testing_utils import require_accelerator, torch_device
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
from ..test_pipelines_common import (
|
||||
IPAdapterTesterMixin,
|
||||
|
||||
@@ -19,8 +19,8 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.models.attention import FreeNoiseTransformerBlock
|
||||
from diffusers.utils import is_xformers_available, logging
|
||||
from diffusers.utils.testing_utils import require_accelerator, torch_device
|
||||
|
||||
from ...testing_utils import require_accelerator, torch_device
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
|
||||
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ from diffusers import (
|
||||
)
|
||||
from diffusers.models.attention import FreeNoiseTransformerBlock
|
||||
from diffusers.utils import is_xformers_available, logging
|
||||
from diffusers.utils.testing_utils import require_accelerator, torch_device
|
||||
|
||||
from ...testing_utils import require_accelerator, torch_device
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
|
||||
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
|
||||
|
||||
|
||||
@@ -46,14 +46,14 @@ from diffusers import (
|
||||
PNDMScheduler,
|
||||
)
|
||||
from diffusers.utils import is_transformers_version
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
is_torch_version,
|
||||
nightly,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
|
||||
from ..test_pipelines_common import PipelineTesterMixin
|
||||
|
||||
|
||||
@@ -27,7 +27,11 @@ from diffusers import (
|
||||
FlowMatchEulerDiscreteScheduler,
|
||||
)
|
||||
from diffusers.pipelines.bria import BriaPipeline
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
# from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist
|
||||
from tests.pipelines.test_pipelines_common import PipelineTesterMixin, to_np
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
numpy_cosine_similarity_distance,
|
||||
@@ -36,9 +40,6 @@ from diffusers.utils.testing_utils import (
|
||||
torch_device,
|
||||
)
|
||||
|
||||
# from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist
|
||||
from tests.pipelines.test_pipelines_common import PipelineTesterMixin, to_np
|
||||
|
||||
|
||||
enable_full_determinism()
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@ import torch
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
from diffusers import AutoencoderKL, ChromaPipeline, ChromaTransformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
from diffusers.utils.testing_utils import torch_device
|
||||
|
||||
from ...testing_utils import torch_device
|
||||
from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin, check_qkv_fused_layers_exist
|
||||
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ import torch
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
from diffusers import AutoencoderKL, ChromaImg2ImgPipeline, ChromaTransformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
from diffusers.utils.testing_utils import floats_tensor, torch_device
|
||||
|
||||
from ...testing_utils import floats_tensor, torch_device
|
||||
from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin, check_qkv_fused_layers_exist
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ import torch
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
from diffusers import AutoencoderKLCogVideoX, CogVideoXPipeline, CogVideoXTransformer3DModel, DDIMScheduler
|
||||
from diffusers.utils.testing_utils import (
|
||||
|
||||
from ...testing_utils import (
|
||||
backend_empty_cache,
|
||||
enable_full_determinism,
|
||||
numpy_cosine_similarity_distance,
|
||||
@@ -29,7 +30,6 @@ from diffusers.utils.testing_utils import (
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
from ..test_pipelines_common import (
|
||||
FasterCacheTesterMixin,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user