1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

move tests to nightly (#4451)

* move tests to nightly

* clean up code quality issues

* more clean up
This commit is contained in:
Dhruv Nair
2023-08-03 15:25:28 +02:00
committed by GitHub
parent 372b58108e
commit 1d2587bb34
13 changed files with 26 additions and 26 deletions

View File

@@ -25,7 +25,7 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils import nightly, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
@@ -205,7 +205,7 @@ class AltDiffusionPipelineFastTests(
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@nightly
@require_torch_gpu
class AltDiffusionPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -32,7 +32,7 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
@@ -254,7 +254,7 @@ class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@nightly
@require_torch_gpu
class AltDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -29,7 +29,7 @@ from diffusers import (
UNet2DConditionModel,
UNet2DModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
@@ -175,7 +175,7 @@ class PipelineFastTests(unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
@slow
@nightly
@require_torch_gpu
class PipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -10,7 +10,7 @@ from diffusers import (
ConsistencyModelPipeline,
UNet2DModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils import nightly, randn_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_2, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
@@ -161,7 +161,7 @@ class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@nightly
@require_torch_gpu
class ConsistencyModelPipelineSlowTests(unittest.TestCase):
def tearDown(self):

View File

@@ -20,7 +20,7 @@ import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
from diffusers.utils import slow, torch_device
from diffusers.utils import nightly, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
@@ -116,7 +116,7 @@ class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@nightly
@require_torch_gpu
class PipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -20,7 +20,7 @@ import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils import is_xformers_available, load_numpy, nightly, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
@@ -106,8 +106,8 @@ class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@nightly
@require_torch_gpu
@slow
class DiTPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
super().tearDown()

View File

@@ -19,7 +19,7 @@ import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNet2DModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device
enable_full_determinism()
@@ -64,7 +64,7 @@ class KarrasVePipelineFastTests(unittest.TestCase):
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@nightly
@require_torch
class KarrasVePipelineIntegrationTests(unittest.TestCase):
def test_inference(self):

View File

@@ -24,7 +24,7 @@ from transformers import CLIPImageProcessor, CLIPVisionConfig
from diffusers import AutoencoderKL, PaintByExamplePipeline, PNDMScheduler, UNet2DConditionModel
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils import floats_tensor, load_image, nightly, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS
@@ -165,7 +165,7 @@ class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@nightly
@require_torch_gpu
class PaintByExamplePipelineIntegrationTests(unittest.TestCase):
def tearDown(self):

View File

@@ -19,7 +19,7 @@ import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device
enable_full_determinism()
@@ -64,7 +64,7 @@ class PNDMPipelineFastTests(unittest.TestCase):
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@nightly
@require_torch
class PNDMPipelineIntegrationTests(unittest.TestCase):
def test_inference_cifar10(self):

View File

@@ -19,7 +19,7 @@ import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device
enable_full_determinism()
@@ -66,7 +66,7 @@ class ScoreSdeVeipelineFastTests(unittest.TestCase):
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@nightly
@require_torch
class ScoreSdeVePipelineIntegrationTests(unittest.TestCase):
def test_inference(self):

View File

@@ -21,7 +21,7 @@ import torch
from diffusers import DDPMScheduler, MidiProcessor, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder
from diffusers.utils import require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils import nightly, require_torch_gpu, skip_mps, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_note_seq, require_onnxruntime
from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS
@@ -162,7 +162,7 @@ class SpectrogramDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa
return super().test_progress_bar()
@slow
@nightly
@require_torch_gpu
@require_onnxruntime
@require_note_seq

View File

@@ -19,7 +19,7 @@ import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
torch.backends.cuda.matmul.allow_tf32 = False
@@ -29,7 +29,7 @@ class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase):
pass
@slow
@nightly
@require_torch_gpu
class VersatileDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase):
def test_inference_image_variations(self):

View File

@@ -22,7 +22,7 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils import load_numpy, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
@@ -193,7 +193,7 @@ class VQDiffusionPipelineFastTests(unittest.TestCase):
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@nightly
@require_torch_gpu
class VQDiffusionPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):