diff --git a/src/diffusers/training_utils.py b/src/diffusers/training_utils.py index 1a3abb49a0..df9c7e8826 100644 --- a/src/diffusers/training_utils.py +++ b/src/diffusers/training_utils.py @@ -1,7 +1,6 @@ import contextlib import copy -import os -import random +from random import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np @@ -14,26 +13,6 @@ if is_transformers_available(): import transformers -def enable_full_determinism(seed: int): - """ - Helper function for reproducible behavior during distributed training. See - - https://pytorch.org/docs/stable/notes/randomness.html for pytorch - """ - # set seed first - set_seed(seed) - - # Enable PyTorch deterministic mode. This potentially requires either the environment - # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, - # depending on the CUDA version, so we set them both here - os.environ["CUDA_LAUNCH_BLOCKING"] = "1" - os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" - torch.use_deterministic_algorithms(True) - - # Enable CUDNN deterministic mode - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - def set_seed(seed: int): """ Args: diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index 4ad7d97b44..93d0ef5b7b 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -514,3 +514,21 @@ class CaptureLogger: def __repr__(self): return f"captured: {self.out}\n" + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/models/test_layers_utils.py b/tests/models/test_layers_utils.py index 98fa1afcbb..b438b2ddb4 100644 --- a/tests/models/test_layers_utils.py +++ b/tests/models/test_layers_utils.py @@ -27,9 +27,6 @@ from diffusers.models.transformer_2d import Transformer2DModel from diffusers.utils import torch_device -torch.backends.cuda.matmul.allow_tf32 = False - - class EmbeddingsTests(unittest.TestCase): def test_timestep_embeddings(self): embedding_dim = 256 diff --git a/tests/models/test_models_unet_1d.py b/tests/models/test_models_unet_1d.py index 78f759cb1a..9fb1a61011 100644 --- a/tests/models/test_models_unet_1d.py +++ b/tests/models/test_models_unet_1d.py @@ -23,9 +23,6 @@ from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False - - class UNet1DModelTests(ModelTesterMixin, unittest.TestCase): model_class = UNet1DModel diff --git a/tests/models/test_models_unet_2d.py b/tests/models/test_models_unet_2d.py index 8f9a6b813f..92a5664daa 100644 --- a/tests/models/test_models_unet_2d.py +++ b/tests/models/test_models_unet_2d.py @@ -21,13 +21,14 @@ import torch from diffusers import UNet2DModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device +from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin logger = logging.get_logger(__name__) -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) + +enable_full_determinism() class Unet2DModelTests(ModelTesterMixin, unittest.TestCase): diff --git a/tests/models/test_models_unet_2d_condition.py b/tests/models/test_models_unet_2d_condition.py index d3ca5ea304..43a487a32b 100644 --- a/tests/models/test_models_unet_2d_condition.py +++ b/tests/models/test_models_unet_2d_condition.py @@ -33,13 +33,14 @@ from diffusers.utils import ( torch_device, ) from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin logger = logging.get_logger(__name__) -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) + +enable_full_determinism() def create_lora_layers(model, mock_weights: bool = True): diff --git a/tests/models/test_models_unet_3d_condition.py b/tests/models/test_models_unet_3d_condition.py index 08863adfea..928f6bcbe9 100644 --- a/tests/models/test_models_unet_3d_condition.py +++ b/tests/models/test_models_unet_3d_condition.py @@ -29,13 +29,14 @@ from diffusers.utils import ( torch_device, ) from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin +enable_full_determinism() + logger = logging.get_logger(__name__) -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) def create_lora_layers(model, mock_weights: bool = True): diff --git a/tests/models/test_models_vae.py b/tests/models/test_models_vae.py index 9a3e49cdfb..fe27e138f5 100644 --- a/tests/models/test_models_vae.py +++ b/tests/models/test_models_vae.py @@ -22,12 +22,12 @@ from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class AutoencoderKLTests(ModelTesterMixin, unittest.TestCase): diff --git a/tests/models/test_models_vq.py b/tests/models/test_models_vq.py index f0be6f6a6d..8ea6ef77ce 100644 --- a/tests/models/test_models_vq.py +++ b/tests/models/test_models_vq.py @@ -19,12 +19,12 @@ import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device +from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class VQModelTests(ModelTesterMixin, unittest.TestCase): diff --git a/tests/others/test_ema.py b/tests/others/test_ema.py index 5526aadc47..32f7ae8a9a 100644 --- a/tests/others/test_ema.py +++ b/tests/others/test_ema.py @@ -20,11 +20,10 @@ import torch from diffusers import UNet2DConditionModel from diffusers.training_utils import EMAModel -from diffusers.utils.testing_utils import skip_mps, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class EMAModelTests(unittest.TestCase): diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion.py b/tests/pipelines/altdiffusion/test_alt_diffusion.py index 9237f7435b..6842d29dc6 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion.py @@ -26,14 +26,13 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class AltDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py index 35a4e91284..61457e6ca0 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py @@ -33,11 +33,10 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/audio_diffusion/test_audio_diffusion.py b/tests/pipelines/audio_diffusion/test_audio_diffusion.py index a848bd0317..8c20f011cb 100644 --- a/tests/pipelines/audio_diffusion/test_audio_diffusion.py +++ b/tests/pipelines/audio_diffusion/test_audio_diffusion.py @@ -30,11 +30,10 @@ from diffusers import ( UNet2DModel, ) from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class PipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/audioldm/test_audioldm.py b/tests/pipelines/audioldm/test_audioldm.py index 566b2c2d2c..0825fc36a2 100644 --- a/tests/pipelines/audioldm/test_audioldm.py +++ b/tests/pipelines/audioldm/test_audioldm.py @@ -37,13 +37,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/controlnet/test_controlnet.py b/tests/pipelines/controlnet/test_controlnet.py index 0453bb38e1..97b5e20f3c 100644 --- a/tests/pipelines/controlnet/test_controlnet.py +++ b/tests/pipelines/controlnet/test_controlnet.py @@ -32,7 +32,7 @@ from diffusers import ( from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, @@ -41,8 +41,7 @@ from ..pipeline_params import ( from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class ControlNetPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/controlnet/test_controlnet_img2img.py b/tests/pipelines/controlnet/test_controlnet_img2img.py index b83a8af277..9d3b10aa82 100644 --- a/tests/pipelines/controlnet/test_controlnet_img2img.py +++ b/tests/pipelines/controlnet/test_controlnet_img2img.py @@ -35,7 +35,7 @@ from diffusers import ( from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, @@ -44,8 +44,7 @@ from ..pipeline_params import ( from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class ControlNetImg2ImgPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/controlnet/test_controlnet_inpaint.py b/tests/pipelines/controlnet/test_controlnet_inpaint.py index 786b0e608e..155286630c 100644 --- a/tests/pipelines/controlnet/test_controlnet_inpaint.py +++ b/tests/pipelines/controlnet/test_controlnet_inpaint.py @@ -35,7 +35,7 @@ from diffusers import ( from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, @@ -44,8 +44,7 @@ from ..pipeline_params import ( from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class ControlNetInpaintPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index 361839043c..0ba86daa61 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -21,13 +21,13 @@ import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/ddim/test_ddim.py b/tests/pipelines/ddim/test_ddim.py index e997ae45d9..0861d7daab 100644 --- a/tests/pipelines/ddim/test_ddim.py +++ b/tests/pipelines/ddim/test_ddim.py @@ -19,13 +19,13 @@ import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel -from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/ddpm/test_ddpm.py b/tests/pipelines/ddpm/test_ddpm.py index 5e3e47cb74..a3c2902151 100644 --- a/tests/pipelines/ddpm/test_ddpm.py +++ b/tests/pipelines/ddpm/test_ddpm.py @@ -19,10 +19,10 @@ import numpy as np import torch from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel -from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class DDPMPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/dit/test_dit.py b/tests/pipelines/dit/test_dit.py index d8098178f3..4937915696 100644 --- a/tests/pipelines/dit/test_dit.py +++ b/tests/pipelines/dit/test_dit.py @@ -21,7 +21,7 @@ import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, @@ -30,7 +30,7 @@ from ..pipeline_params import ( from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/karras_ve/test_karras_ve.py b/tests/pipelines/karras_ve/test_karras_ve.py index 391e61a2b9..142058bcd7 100644 --- a/tests/pipelines/karras_ve/test_karras_ve.py +++ b/tests/pipelines/karras_ve/test_karras_ve.py @@ -19,10 +19,10 @@ import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNet2DModel -from diffusers.utils.testing_utils import require_torch, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class KarrasVePipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index 05ff4162e5..88dc8ef9b1 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -21,13 +21,20 @@ import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel -from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import ( + enable_full_determinism, + load_numpy, + nightly, + require_torch_gpu, + slow, + torch_device, +) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index f1aa2f08ef..d21ead543a 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -21,10 +21,10 @@ import torch from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel from diffusers.utils import PIL_INTERPOLATION, floats_tensor, load_image, slow, torch_device -from diffusers.utils.testing_utils import require_torch +from diffusers.utils.testing_utils import enable_full_determinism, require_torch -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class LDMSuperResolutionPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py index aa7b33730d..ff8670ea29 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py @@ -20,10 +20,10 @@ import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel -from diffusers.utils.testing_utils import require_torch, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class LDMPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/paint_by_example/test_paint_by_example.py b/tests/pipelines/paint_by_example/test_paint_by_example.py index 80ba3f5ed3..14c1664488 100644 --- a/tests/pipelines/paint_by_example/test_paint_by_example.py +++ b/tests/pipelines/paint_by_example/test_paint_by_example.py @@ -25,14 +25,13 @@ from transformers import CLIPImageProcessor, CLIPVisionConfig from diffusers import AutoencoderKL, PaintByExamplePipeline, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder from diffusers.utils import floats_tensor, load_image, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/pndm/test_pndm.py b/tests/pipelines/pndm/test_pndm.py index bed5fea561..c259571393 100644 --- a/tests/pipelines/pndm/test_pndm.py +++ b/tests/pipelines/pndm/test_pndm.py @@ -19,10 +19,10 @@ import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel -from diffusers.utils.testing_utils import require_torch, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class PNDMPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/repaint/test_repaint.py b/tests/pipelines/repaint/test_repaint.py index 59968eaf10..e372cf979e 100644 --- a/tests/pipelines/repaint/test_repaint.py +++ b/tests/pipelines/repaint/test_repaint.py @@ -20,14 +20,21 @@ import numpy as np import torch from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel -from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device +from diffusers.utils.testing_utils import ( + enable_full_determinism, + load_image, + load_numpy, + nightly, + require_torch_gpu, + skip_mps, + torch_device, +) from ..pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/score_sde_ve/test_score_sde_ve.py b/tests/pipelines/score_sde_ve/test_score_sde_ve.py index 036ecc3f6b..32505253f6 100644 --- a/tests/pipelines/score_sde_ve/test_score_sde_ve.py +++ b/tests/pipelines/score_sde_ve/test_score_sde_ve.py @@ -19,10 +19,10 @@ import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel -from diffusers.utils.testing_utils import require_torch, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class ScoreSdeVeipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py index ba42b1fe9c..9e810616dc 100644 --- a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py +++ b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py @@ -25,10 +25,10 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.semantic_stable_diffusion import SemanticStableDiffusionPipeline as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class SafeDiffusionPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py b/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py index 3ec6f681be..cc8690eb87 100644 --- a/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py +++ b/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py @@ -22,13 +22,13 @@ import torch from diffusers import DDPMScheduler, MidiProcessor, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder from diffusers.utils import require_torch_gpu, skip_mps, slow, torch_device -from diffusers.utils.testing_utils import require_note_seq, require_onnxruntime +from diffusers.utils.testing_utils import enable_full_determinism, require_note_seq, require_onnxruntime from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() MIDI_FILE = "./tests/fixtures/elise_format0.mid" diff --git a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py index 3d6bfff1bb..a1ae3d2d0e 100644 --- a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py @@ -23,14 +23,13 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class CycleDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 1f52a09b67..aec4436710 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -15,11 +15,16 @@ import gc +import os +import signal +import subprocess +import sys import tempfile import time import unittest import numpy as np +import pytest import torch from huggingface_hub import hf_hub_download from packaging import version @@ -39,15 +44,25 @@ from diffusers import ( ) from diffusers.models.attention_processor import AttnProcessor from diffusers.utils import load_numpy, nightly, slow, torch_device -from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu +from diffusers.utils.testing_utils import CaptureLogger, enable_full_determinism, require_torch_gpu from ...models.test_models_unet_2d_condition import create_lora_layers from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +@pytest.fixture(autouse=True) +def process_fixture(): + # This will be run before each test + command = [sys.executable, os.path.abspath(__file__)] + process = subprocess.Popen(command) + enable_full_determinism() + yield process + # This will be run after each test + try: + os.kill(process.pid, signal.SIGTERM) # or signal.SIGKILL + except ProcessLookupError: + pass class StableDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): @@ -551,8 +566,7 @@ class StableDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTester @slow @require_torch_gpu class StableDiffusionPipelineSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() + def setUp(self): gc.collect() torch.cuda.empty_cache() diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py index 0ce55ae78a..c35d84de98 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py @@ -30,14 +30,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusionImageVariationPipelineFastTests( diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index 4afc16d9b6..8ab252b9be 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -34,7 +34,7 @@ from diffusers import ( ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, @@ -44,8 +44,7 @@ from ..pipeline_params import ( from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusionImg2ImgPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index 5c2d9d7c44..44de277ead 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -33,15 +33,14 @@ from diffusers import ( ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ...models.test_models_unet_2d_condition import create_lora_layers from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusionInpaintPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py index 8647041fbb..fa00a0d201 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py @@ -34,11 +34,10 @@ from diffusers import ( VQModel, ) from diffusers.utils import floats_tensor, load_image, nightly, slow, torch_device -from diffusers.utils.testing_utils import load_numpy, preprocess_image, require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, preprocess_image, require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusionInpaintLegacyPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py index 99a0694938..fbff6c5549 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py @@ -32,14 +32,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import floats_tensor, load_image, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusionInstructPix2PixPipelineFastTests( diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py index 546b1d2125..4eccb871a0 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py @@ -21,10 +21,10 @@ import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() @slow diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py index b448dbef1e..cba20417bc 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py @@ -29,14 +29,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() @skip_mps diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py index 61708b36bf..02a15b2a29 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py @@ -30,14 +30,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() @skip_mps diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py index 90cc856464..98f5910ab3 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py @@ -33,14 +33,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import floats_tensor, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import load_image, load_pt, require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, load_image, load_pt, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() @skip_mps diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py index 7cb8ab409a..2b0f0bfc11 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py @@ -27,14 +27,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index bc4ab7d664..3f9867783b 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -33,14 +33,13 @@ from diffusers import ( logging, ) from diffusers.utils import load_numpy, nightly, slow, torch_device -from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu +from diffusers.utils.testing_utils import CaptureLogger, enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusion2PipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index ae1eefa682..08ac298689 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -49,14 +49,13 @@ from diffusers.utils import ( slow, torch_device, ) -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() @skip_mps diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py index c9da7b0689..8df5b6da84 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py @@ -33,14 +33,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import load_image, slow -from diffusers.utils.testing_utils import floats_tensor, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index 77242add93..10d8561f01 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -24,14 +24,13 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, slow +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusion2InpaintPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py index 539b4b1cc3..561536a44e 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py @@ -29,13 +29,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class StableDiffusionLatentUpscalePipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index 747809a4fb..7100e5023a 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -24,10 +24,10 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False +enable_full_determinism() class StableDiffusionUpscalePipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index a874cbb7e0..d1a2c85665 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -30,11 +30,10 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.utils import load_numpy, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase): diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index c614fa4805..09e31aacfb 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -28,9 +28,6 @@ from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu -torch.backends.cuda.matmul.allow_tf32 = False - - class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test diff --git a/tests/pipelines/stable_unclip/test_stable_unclip.py b/tests/pipelines/stable_unclip/test_stable_unclip.py index 78775a938b..8b4a065cd4 100644 --- a/tests/pipelines/stable_unclip/test_stable_unclip.py +++ b/tests/pipelines/stable_unclip/test_stable_unclip.py @@ -13,14 +13,13 @@ from diffusers import ( UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer -from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableUnCLIPPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py b/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py index dcd4300b85..35cae61242 100644 --- a/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py +++ b/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py @@ -18,6 +18,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( + enable_full_determinism, floats_tensor, load_image, load_numpy, @@ -35,8 +36,7 @@ from ..test_pipelines_common import ( ) -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class StableUnCLIPImg2ImgPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/test_pipelines.py b/tests/pipelines/test_pipelines.py index df1a3b6ac7..a9abb0b4fb 100644 --- a/tests/pipelines/test_pipelines.py +++ b/tests/pipelines/test_pipelines.py @@ -65,6 +65,7 @@ from diffusers.utils import ( ) from diffusers.utils.testing_utils import ( CaptureLogger, + enable_full_determinism, get_tests_dir, load_numpy, require_compel, @@ -73,8 +74,7 @@ from diffusers.utils.testing_utils import ( ) -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class DownloadTests(unittest.TestCase): @@ -700,7 +700,6 @@ class CustomPipelineTests(unittest.TestCase): def test_download_from_git(self): # Because adaptive_avg_pool2d_backward_cuda # does not have a deterministic implementation. - torch.use_deterministic_algorithms(False) clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) @@ -722,7 +721,6 @@ class CustomPipelineTests(unittest.TestCase): image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0] assert image.shape == (512, 512, 3) - torch.use_deterministic_algorithms(True) def test_save_pipeline_change_config(self): pipe = DiffusionPipeline.from_pretrained( diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index f23e850f4d..3984ed76ed 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -18,9 +18,6 @@ from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_ from diffusers.utils.testing_utils import require_torch, torch_device -torch.backends.cuda.matmul.allow_tf32 = False - - def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() diff --git a/tests/pipelines/text_to_video/test_text_to_video.py b/tests/pipelines/text_to_video/test_text_to_video.py index 212becbb67..8b4bae2275 100644 --- a/tests/pipelines/text_to_video/test_text_to_video.py +++ b/tests/pipelines/text_to_video/test_text_to_video.py @@ -27,13 +27,13 @@ from diffusers import ( UNet3DConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow +from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() @skip_mps diff --git a/tests/pipelines/unclip/test_unclip.py b/tests/pipelines/unclip/test_unclip.py index 5357e5b0e7..393c3ba163 100644 --- a/tests/pipelines/unclip/test_unclip.py +++ b/tests/pipelines/unclip/test_unclip.py @@ -23,14 +23,13 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokeni from diffusers import PriorTransformer, UnCLIPPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import load_numpy, nightly, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/unclip/test_unclip_image_variation.py b/tests/pipelines/unclip/test_unclip_image_variation.py index ded162102d..75a2625080 100644 --- a/tests/pipelines/unclip/test_unclip_image_variation.py +++ b/tests/pipelines/unclip/test_unclip_image_variation.py @@ -37,14 +37,13 @@ from diffusers import ( ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import load_image, require_torch_gpu, skip_mps +from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference -torch.backends.cuda.matmul.allow_tf32 = False -torch.use_deterministic_algorithms(True) +enable_full_determinism() class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase):