1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Move more slow tests to nightly (#5220)

* move to nightly

* fix mistake
This commit is contained in:
Dhruv Nair
2023-09-28 19:00:41 +05:30
committed by GitHub
parent 622f35b1d0
commit c78ee143e9
14 changed files with 27 additions and 28 deletions

View File

@@ -29,7 +29,7 @@ from diffusers import (
UNet2DConditionModel,
UNet2DModel,
)
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
enable_full_determinism()
@@ -95,7 +95,7 @@ class PipelineFastTests(unittest.TestCase):
)
return vqvae, unet
@slow
@nightly
def test_audio_diffusion(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
mel = Mel(

View File

@@ -37,7 +37,7 @@ from diffusers import (
UNet2DConditionModel,
)
from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
@@ -369,7 +369,7 @@ class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
@slow
@nightly
class AudioLDMPipelineSlowTests(unittest.TestCase):
def tearDown(self):
super().tearDown()

View File

@@ -33,8 +33,8 @@ from diffusers.utils.testing_utils import (
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
@@ -232,7 +232,7 @@ class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unitte
super().test_float16_inference(expected_max_diff=2e-1)
@slow
@nightly
@require_torch_gpu
class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -20,7 +20,7 @@ import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device
enable_full_determinism()
@@ -96,7 +96,7 @@ class LDMPipelineFastTests(unittest.TestCase):
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@nightly
@require_torch
class LDMPipelineIntegrationTests(unittest.TestCase):
def test_inference_uncond(self):

View File

@@ -20,13 +20,13 @@ import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
enable_full_determinism()
@slow
@nightly
@require_torch_gpu
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -28,7 +28,7 @@ from diffusers import (
StableDiffusionModelEditingPipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
@@ -184,7 +184,7 @@ class StableDiffusionModelEditingPipelineFastTests(
super().test_attention_slicing_forward_pass(expected_max_diff=5e-3)
@slow
@nightly
@require_torch_gpu
class StableDiffusionModelEditingSlowTests(unittest.TestCase):
def tearDown(self):

View File

@@ -29,8 +29,8 @@ from diffusers import (
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
nightly,
require_torch_gpu,
slow,
torch_device,
)
@@ -188,7 +188,7 @@ class StableDiffusionParadigmsPipelineFastTests(PipelineLatentTesterMixin, Pipel
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@nightly
@require_torch_gpu
class StableDiffusionParadigmsPipelineSlowTests(unittest.TestCase):
def tearDown(self):

View File

@@ -26,7 +26,7 @@ from diffusers import (
StableDiffusionSAGPipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
@@ -115,7 +115,7 @@ class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTes
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@nightly
@require_torch_gpu
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -28,10 +28,10 @@ from diffusers import (
)
from diffusers.utils.testing_utils import (
load_numpy,
nightly,
numpy_cosine_similarity_distance,
require_torch_gpu,
skip_mps,
slow,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
@@ -187,7 +187,7 @@ class StableDiffusionAttendAndExcitePipelineFastTests(
@require_torch_gpu
@slow
@nightly
class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase):
# Attend and excite requires being able to run a backward pass at
# inference time. There's no deterministic backward operator for pad

View File

@@ -39,7 +39,6 @@ from diffusers.utils.testing_utils import (
nightly,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
@@ -292,7 +291,7 @@ class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, Pipeli
@require_torch_gpu
@slow
@nightly
class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
super().tearDown()

View File

@@ -13,7 +13,7 @@ from diffusers import (
UNet2DConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
@@ -185,7 +185,7 @@ class StableUnCLIPPipelineFastTests(
self._test_inference_batch_single_identical(expected_max_diff=1e-3)
@slow
@nightly
@require_torch_gpu
class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -22,9 +22,9 @@ from diffusers.utils.testing_utils import (
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
@@ -206,7 +206,7 @@ class StableUnCLIPImg2ImgPipelineFastTests(
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False)
@slow
@nightly
@require_torch_gpu
class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -18,12 +18,12 @@ import unittest
import torch
from diffusers import DDIMScheduler, TextToVideoZeroPipeline
from diffusers.utils.testing_utils import load_pt, require_torch_gpu, slow
from diffusers.utils.testing_utils import load_pt, nightly, require_torch_gpu
from ..test_pipelines_common import assert_mean_pixel_difference
@slow
@nightly
@require_torch_gpu
class TextToVideoZeroPipelineSlowTests(unittest.TestCase):
def test_full_model(self):

View File

@@ -31,9 +31,9 @@ from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
is_flaky,
nightly,
numpy_cosine_similarity_distance,
skip_mps,
slow,
torch_device,
)
@@ -195,7 +195,7 @@ class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
return super().test_progress_bar()
@slow
@nightly
@skip_mps
class VideoToVideoSDPipelineSlowTests(unittest.TestCase):
def test_two_step_model(self):