mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
Move to slow tests to nightly (#5093)
* move slow tests to nightly * move slow tests to nightly
This commit is contained in:
@@ -45,7 +45,7 @@ from diffusers import (
|
||||
PNDMScheduler,
|
||||
)
|
||||
from diffusers.utils import is_xformers_available
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device
|
||||
|
||||
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
|
||||
from ..test_pipelines_common import PipelineTesterMixin
|
||||
@@ -492,7 +492,7 @@ class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values()))
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
class AudioLDM2PipelineSlowTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
|
||||
@@ -29,8 +29,8 @@ from diffusers.utils.testing_utils import (
|
||||
floats_tensor,
|
||||
load_image,
|
||||
load_numpy,
|
||||
nightly,
|
||||
require_torch_gpu,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
@@ -291,7 +291,7 @@ class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class KandinskyInpaintPipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -32,8 +32,8 @@ from diffusers.utils.testing_utils import (
|
||||
floats_tensor,
|
||||
load_image,
|
||||
load_numpy,
|
||||
nightly,
|
||||
require_torch_gpu,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
@@ -219,7 +219,7 @@ class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.Test
|
||||
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -26,7 +26,6 @@ from diffusers.utils.testing_utils import (
|
||||
load_numpy,
|
||||
nightly,
|
||||
require_torch_gpu,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
@@ -136,7 +135,7 @@ class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class LDMTextToImagePipelineSlowTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -25,8 +25,8 @@ from diffusers.utils.testing_utils import (
|
||||
enable_full_determinism,
|
||||
floats_tensor,
|
||||
load_image,
|
||||
nightly,
|
||||
require_torch,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
@@ -114,7 +114,7 @@ class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
|
||||
assert image.shape == (1, 64, 64, 3)
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch
|
||||
class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase):
|
||||
def test_inference_superresolution(self):
|
||||
|
||||
@@ -21,7 +21,7 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokeni
|
||||
|
||||
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
|
||||
from diffusers.pipelines.shap_e import ShapERenderer
|
||||
from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, slow, torch_device
|
||||
from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device
|
||||
|
||||
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
|
||||
|
||||
@@ -230,7 +230,7 @@ class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
assert images.shape[0] == batch_size * num_images_per_prompt
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class ShapEPipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -22,7 +22,14 @@ from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
|
||||
|
||||
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline
|
||||
from diffusers.pipelines.shap_e import ShapERenderer
|
||||
from diffusers.utils.testing_utils import floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device
|
||||
from diffusers.utils.testing_utils import (
|
||||
floats_tensor,
|
||||
load_image,
|
||||
load_numpy,
|
||||
nightly,
|
||||
require_torch_gpu,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
|
||||
|
||||
@@ -243,7 +250,7 @@ class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
assert images.shape[0] == batch_size * num_images_per_prompt
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -27,9 +27,9 @@ from diffusers.utils.testing_utils import (
|
||||
floats_tensor,
|
||||
load_image,
|
||||
load_numpy,
|
||||
nightly,
|
||||
require_torch_gpu,
|
||||
skip_mps,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
@@ -194,7 +194,7 @@ class CycleDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterM
|
||||
return super().test_attention_slicing_forward_pass()
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class CycleDiffusionPipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -28,7 +28,7 @@ from diffusers import (
|
||||
StableDiffusionLDM3DPipeline,
|
||||
UNet2DConditionModel,
|
||||
)
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, slow, torch_device
|
||||
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
|
||||
|
||||
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
||||
|
||||
@@ -203,7 +203,7 @@ class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase):
|
||||
assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -18,7 +18,7 @@ import unittest
|
||||
|
||||
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
|
||||
from diffusers.utils import is_flax_available
|
||||
from diffusers.utils.testing_utils import nightly, require_flax, slow
|
||||
from diffusers.utils.testing_utils import nightly, require_flax
|
||||
|
||||
|
||||
if is_flax_available():
|
||||
@@ -28,7 +28,7 @@ if is_flax_available():
|
||||
from flax.training.common_utils import shard
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_flax
|
||||
class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -28,7 +28,6 @@ from diffusers.utils.testing_utils import (
|
||||
nightly,
|
||||
require_torch_gpu,
|
||||
skip_mps,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
@@ -451,7 +450,7 @@ class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase):
|
||||
assert np.abs(expected_image - image).max() < 1e-1
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class UnCLIPPipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -41,9 +41,9 @@ from diffusers.utils.testing_utils import (
|
||||
floats_tensor,
|
||||
load_image,
|
||||
load_numpy,
|
||||
nightly,
|
||||
require_torch_gpu,
|
||||
skip_mps,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
@@ -492,7 +492,7 @@ class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCa
|
||||
return super().test_save_load_optional_components()
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
@@ -20,7 +20,7 @@ from diffusers import (
|
||||
UniDiffuserPipeline,
|
||||
UniDiffuserTextDecoder,
|
||||
)
|
||||
from diffusers.utils.testing_utils import floats_tensor, load_image, nightly, require_torch_gpu, slow, torch_device
|
||||
from diffusers.utils.testing_utils import floats_tensor, load_image, nightly, require_torch_gpu, torch_device
|
||||
from diffusers.utils.torch_utils import randn_tensor
|
||||
|
||||
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
|
||||
@@ -517,7 +517,7 @@ class UniDiffuserPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_gpu
|
||||
class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
|
||||
Reference in New Issue
Block a user