From dae161ed260955a906760dc3a8d71b8b04a3cc5b Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Fri, 3 Oct 2025 17:39:55 +0530 Subject: [PATCH] up --- tests/lora/test_lora_layers_cogvideox.py | 6 ++++-- tests/lora/test_lora_layers_flux.py | 6 +++--- tests/lora/test_lora_layers_hunyuanvideo.py | 3 ++- tests/lora/test_lora_layers_sdxl.py | 6 ++++-- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/tests/lora/test_lora_layers_cogvideox.py b/tests/lora/test_lora_layers_cogvideox.py index d2557a5337..4ba9c9516d 100644 --- a/tests/lora/test_lora_layers_cogvideox.py +++ b/tests/lora/test_lora_layers_cogvideox.py @@ -124,8 +124,10 @@ class TestCogVideoXLoRA(PeftLoraLoaderMixinTests): def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) - def test_lora_scale_kwargs_match_fusion(self): - super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3) + def test_lora_scale_kwargs_match_fusion(self, base_pipe_output): + super().test_lora_scale_kwargs_match_fusion( + base_pipe_output=base_pipe_output, expected_atol=9e-3, expected_rtol=9e-3 + ) @pytest.mark.parametrize( "offload_type, use_stream", diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 556ec00391..1589aa4082 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -754,7 +754,7 @@ class TestFluxLoRAIntegration: seed = 0 @pytest.fixture(scope="function") - def pipeline(self, torch_device): + def pipeline(self): gc.collect() backend_empty_cache(torch_device) pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) @@ -873,10 +873,10 @@ class TestFluxControlLoRAIntegration: prompt = "A robot made of exotic candies and chocolates of different kinds." @pytest.fixture(scope="function") - def pipeline(self, torch_device): + def pipeline(self): gc.collect() backend_empty_cache(torch_device) - pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) + pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) try: yield pipe finally: diff --git a/tests/lora/test_lora_layers_hunyuanvideo.py b/tests/lora/test_lora_layers_hunyuanvideo.py index 52ee3cd9f7..8ee7db8de7 100644 --- a/tests/lora/test_lora_layers_hunyuanvideo.py +++ b/tests/lora/test_lora_layers_hunyuanvideo.py @@ -37,6 +37,7 @@ from ..testing_utils import ( require_peft_backend, require_torch_accelerator, skip_mps, + torch_device, ) @@ -207,7 +208,7 @@ class TestHunyuanVideoLoRAIntegration: seed = 0 @pytest.fixture(scope="function") - def pipeline(self, torch_device): + def pipeline(self): gc.collect() backend_empty_cache(torch_device) diff --git a/tests/lora/test_lora_layers_sdxl.py b/tests/lora/test_lora_layers_sdxl.py index 6ee81dac32..7b53464c88 100644 --- a/tests/lora/test_lora_layers_sdxl.py +++ b/tests/lora/test_lora_layers_sdxl.py @@ -132,7 +132,7 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests): expected_atol=expected_atol, expected_rtol=expected_rtol ) - def test_lora_scale_kwargs_match_fusion(self): + def test_lora_scale_kwargs_match_fusion(self, base_pipe_output): if torch.cuda.is_available(): expected_atol = 9e-2 expected_rtol = 9e-2 @@ -140,7 +140,9 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests): expected_atol = 1e-3 expected_rtol = 1e-3 - super().test_lora_scale_kwargs_match_fusion(expected_atol=expected_atol, expected_rtol=expected_rtol) + super().test_lora_scale_kwargs_match_fusion( + base_pipe_output=base_pipe_output, expected_atol=expected_atol, expected_rtol=expected_rtol + ) @slow