diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index 60c4112838..5a3c79422c 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -22,7 +22,6 @@ from diffusers.utils import is_accelerate_available, is_accelerate_version, load from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, - disable_full_determinism, enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, @@ -34,6 +33,9 @@ from diffusers.utils.testing_utils import ( from ..test_pipelines_common import PipelineTesterMixin +enable_full_determinism() + + def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() @@ -465,8 +467,6 @@ class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): - disable_full_determinism() - expected_max_diff = 9e-4 if not self.test_xformers_attention: @@ -496,8 +496,6 @@ class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") - enable_full_determinism() - @slow @require_torch_gpu