From c4bcf72084f4400a98b666e5bcd1a0103ab8bb34 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Fri, 3 Oct 2025 16:56:31 +0530 Subject: [PATCH] up --- tests/lora/test_lora_layers_auraflow.py | 24 +++++---------- tests/lora/test_lora_layers_cogvideox.py | 33 +++++++-------------- tests/lora/test_lora_layers_cogview4.py | 30 ++++++------------- tests/lora/test_lora_layers_flux.py | 24 +++++---------- tests/lora/test_lora_layers_hunyuanvideo.py | 27 ++++++----------- tests/lora/test_lora_layers_ltx_video.py | 24 +++++---------- tests/lora/test_lora_layers_lumina2.py | 24 +++++---------- tests/lora/test_lora_layers_mochi.py | 27 ++++++----------- tests/lora/test_lora_layers_qwenimage.py | 24 +++++---------- tests/lora/test_lora_layers_sana.py | 24 +++++---------- tests/lora/test_lora_layers_sd3.py | 12 +++----- tests/lora/test_lora_layers_wan.py | 24 +++++---------- tests/lora/test_lora_layers_wanvace.py | 27 +++++------------ tests/lora/utils.py | 6 +--- 14 files changed, 106 insertions(+), 224 deletions(-) diff --git a/tests/lora/test_lora_layers_auraflow.py b/tests/lora/test_lora_layers_auraflow.py index 650301fa45..e3bbbfb632 100644 --- a/tests/lora/test_lora_layers_auraflow.py +++ b/tests/lora/test_lora_layers_auraflow.py @@ -99,42 +99,34 @@ class TestAuraFlowLoRA(PeftLoraLoaderMixinTests): return noise, input_ids, pipeline_inputs - pytest.mark.skip("Not supported in AuraFlow.") - + @pytest.mark.skip("Not supported in AuraFlow.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in AuraFlow.") - + @pytest.mark.skip("Not supported in AuraFlow.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in AuraFlow.") - + @pytest.mark.skip("Not supported in AuraFlow.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") - + @pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") - + @pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") - + @pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") - + @pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") - + @pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_cogvideox.py b/tests/lora/test_lora_layers_cogvideox.py index 27dc81f763..d2557a5337 100644 --- a/tests/lora/test_lora_layers_cogvideox.py +++ b/tests/lora/test_lora_layers_cogvideox.py @@ -129,11 +129,7 @@ class TestCogVideoXLoRA(PeftLoraLoaderMixinTests): @pytest.mark.parametrize( "offload_type, use_stream", - [ - ("block_level", True), - ("leaf_level", False), - ("leaf_level", True), - ], + [("block_level", True), ("leaf_level", False), ("leaf_level", True)], ) @require_torch_accelerator def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname): @@ -141,47 +137,38 @@ class TestCogVideoXLoRA(PeftLoraLoaderMixinTests): # The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338 super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname) - pytest.mark.skip("Not supported in CogVideoX.") - + @pytest.mark.skip("Not supported in CogVideoX.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in CogVideoX.") - + @pytest.mark.skip("Not supported in CogVideoX.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in CogVideoX.") - + @pytest.mark.skip("Not supported in CogVideoX.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.") def test_simple_inference_with_text_lora_save_load(self): pass - pytest.mark.skip("Not supported in CogVideoX.") - + @pytest.mark.skip("Not supported in CogVideoX.") def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): pass diff --git a/tests/lora/test_lora_layers_cogview4.py b/tests/lora/test_lora_layers_cogview4.py index fa614c56cd..363da9f265 100644 --- a/tests/lora/test_lora_layers_cogview4.py +++ b/tests/lora/test_lora_layers_cogview4.py @@ -142,11 +142,7 @@ class TestCogView4LoRA(PeftLoraLoaderMixinTests): @pytest.mark.parametrize( "offload_type, use_stream", - [ - ("block_level", True), - ("leaf_level", False), - ("leaf_level", True), - ], + [("block_level", True), ("leaf_level", False), ("leaf_level", True)], ) @require_torch_accelerator def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname): @@ -154,42 +150,34 @@ class TestCogView4LoRA(PeftLoraLoaderMixinTests): # The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338 super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname) - pytest.mark.skip("Not supported in CogView4.") - + @pytest.mark.skip("Not supported in CogView4.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in CogView4.") - + @pytest.mark.skip("Not supported in CogView4.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in CogView4.") - + @pytest.mark.skip("Not supported in CogView4.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") - + @pytest.mark.skip("Text encoder LoRA is not supported in CogView4.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 8db06a801c..556ec00391 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -230,23 +230,19 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests): "LoRA should lead to different results." ) - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): pass @@ -725,23 +721,19 @@ class TestFluxControlLoRA(PeftLoraLoaderMixinTests): assert pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2 assert pipe.transformer.config.in_channels == in_features * 2 - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Not supported in Flux.") - + @pytest.mark.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): pass diff --git a/tests/lora/test_lora_layers_hunyuanvideo.py b/tests/lora/test_lora_layers_hunyuanvideo.py index 3439fef15c..52ee3cd9f7 100644 --- a/tests/lora/test_lora_layers_hunyuanvideo.py +++ b/tests/lora/test_lora_layers_hunyuanvideo.py @@ -155,48 +155,39 @@ class TestHunyuanVideoLoRA(PeftLoraLoaderMixinTests): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) # TODO(aryan): Fix the following test - pytest.mark.skip("This test fails with an error I haven't been able to debug yet.") - + @pytest.mark.skip("This test fails with an error I haven't been able to debug yet.") def test_simple_inference_save_pretrained(self): pass - pytest.mark.skip("Not supported in HunyuanVideo.") - + @pytest.mark.skip("Not supported in HunyuanVideo.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in HunyuanVideo.") - + @pytest.mark.skip("Not supported in HunyuanVideo.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in HunyuanVideo.") - + @pytest.mark.skip("Not supported in HunyuanVideo.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_ltx_video.py b/tests/lora/test_lora_layers_ltx_video.py index db5ade6f67..37bad941bf 100644 --- a/tests/lora/test_lora_layers_ltx_video.py +++ b/tests/lora/test_lora_layers_ltx_video.py @@ -114,42 +114,34 @@ class TestLTXVideoLoRA(PeftLoraLoaderMixinTests): def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) - pytest.mark.skip("Not supported in LTXVideo.") - + @pytest.mark.skip("Not supported in LTXVideo.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in LTXVideo.") - + @pytest.mark.skip("Not supported in LTXVideo.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in LTXVideo.") - + @pytest.mark.skip("Not supported in LTXVideo.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") - + @pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_lumina2.py b/tests/lora/test_lora_layers_lumina2.py index 6ce70d53a0..c0ee9c34e4 100644 --- a/tests/lora/test_lora_layers_lumina2.py +++ b/tests/lora/test_lora_layers_lumina2.py @@ -100,43 +100,35 @@ class TestLumina2LoRA(PeftLoraLoaderMixinTests): return noise, input_ids, pipeline_inputs - pytest.mark.skip("Not supported in Lumina2.") - + @pytest.mark.skip("Not supported in Lumina2.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in Lumina2.") - + @pytest.mark.skip("Not supported in Lumina2.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in Lumina2.") - + @pytest.mark.skip("Not supported in Lumina2.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_mochi.py b/tests/lora/test_lora_layers_mochi.py index eddf59a696..f9da672732 100644 --- a/tests/lora/test_lora_layers_mochi.py +++ b/tests/lora/test_lora_layers_mochi.py @@ -105,47 +105,38 @@ class TestMochiLoRA(PeftLoraLoaderMixinTests): def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) - pytest.mark.skip("Not supported in Mochi.") - + @pytest.mark.skip("Not supported in Mochi.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in Mochi.") - + @pytest.mark.skip("Not supported in Mochi.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in Mochi.") - + @pytest.mark.skip("Not supported in Mochi.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Mochi.") def test_simple_inference_with_text_lora_save_load(self): pass - pytest.mark.skip("Not supported in CogVideoX.") - + @pytest.mark.skip("Not supported in CogVideoX.") def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): pass diff --git a/tests/lora/test_lora_layers_qwenimage.py b/tests/lora/test_lora_layers_qwenimage.py index 470c2212c2..c244646530 100644 --- a/tests/lora/test_lora_layers_qwenimage.py +++ b/tests/lora/test_lora_layers_qwenimage.py @@ -96,42 +96,34 @@ class TestQwenImageLoRA(PeftLoraLoaderMixinTests): return noise, input_ids, pipeline_inputs - pytest.mark.skip("Not supported in Qwen Image.") - + @pytest.mark.skip("Not supported in Qwen Image.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in Qwen Image.") - + @pytest.mark.skip("Not supported in Qwen Image.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in Qwen Image.") - + @pytest.mark.skip("Not supported in Qwen Image.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_sana.py b/tests/lora/test_lora_layers_sana.py index 0f2a3cbe9e..5977aeb9a5 100644 --- a/tests/lora/test_lora_layers_sana.py +++ b/tests/lora/test_lora_layers_sana.py @@ -105,42 +105,34 @@ class TestSanaLoRA(PeftLoraLoaderMixinTests): return noise, input_ids, pipeline_inputs - pytest.mark.skip("Not supported in SANA.") - + @pytest.mark.skip("Not supported in SANA.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Not supported in SANA.") - + @pytest.mark.skip("Not supported in SANA.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in SANA.") - + @pytest.mark.skip("Not supported in SANA.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in SANA.") - + @pytest.mark.skip("Text encoder LoRA is not supported in SANA.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in SANA.") - + @pytest.mark.skip("Text encoder LoRA is not supported in SANA.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in SANA.") - + @pytest.mark.skip("Text encoder LoRA is not supported in SANA.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in SANA.") - + @pytest.mark.skip("Text encoder LoRA is not supported in SANA.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in SANA.") - + @pytest.mark.skip("Text encoder LoRA is not supported in SANA.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_sd3.py b/tests/lora/test_lora_layers_sd3.py index d60d098411..a44f6887f4 100644 --- a/tests/lora/test_lora_layers_sd3.py +++ b/tests/lora/test_lora_layers_sd3.py @@ -113,23 +113,19 @@ class TestSD3LoRA(PeftLoraLoaderMixinTests): lora_filename = "lora_peft_format.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) - pytest.mark.skip("Not supported in SD3.") - + @pytest.mark.skip("Not supported in SD3.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in SD3.") - + @pytest.mark.skip("Not supported in SD3.") def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): pass - pytest.mark.skip("Not supported in SD3.") - + @pytest.mark.skip("Not supported in SD3.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in SD3.") - + @pytest.mark.skip("Not supported in SD3.") def test_modify_padding_mode(self): pass diff --git a/tests/lora/test_lora_layers_wan.py b/tests/lora/test_lora_layers_wan.py index 18c671aa2f..3393521471 100644 --- a/tests/lora/test_lora_layers_wan.py +++ b/tests/lora/test_lora_layers_wan.py @@ -110,42 +110,34 @@ class TestWanLoRA(PeftLoraLoaderMixinTests): def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) - pytest.mark.skip("Not supported in Wan.") - + @pytest.mark.skip("Not supported in Wan.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in Wan.") - + @pytest.mark.skip("Not supported in Wan.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in Wan.") - + @pytest.mark.skip("Not supported in Wan.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora_save_load(self): pass diff --git a/tests/lora/test_lora_layers_wanvace.py b/tests/lora/test_lora_layers_wanvace.py index 1c94930688..95a1638d21 100644 --- a/tests/lora/test_lora_layers_wanvace.py +++ b/tests/lora/test_lora_layers_wanvace.py @@ -126,49 +126,38 @@ class TestWanVACELoRA(PeftLoraLoaderMixinTests): def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) - pytest.mark.skip("Not supported in Wan VACE.") - + @pytest.mark.skip("Not supported in Wan VACE.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - pytest.mark.skip("Not supported in Wan VACE.") - + @pytest.mark.skip("Not supported in Wan VACE.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - pytest.mark.skip("Not supported in Wan VACE.") - + @pytest.mark.skip("Not supported in Wan VACE.") def test_modify_padding_mode(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_partial_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_and_scale(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_fused(self): pass - pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") - + @pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_save_load(self): pass - def test_layerwise_casting_inference_denoiser(self): - super().test_layerwise_casting_inference_denoiser() - @require_peft_version_greater("0.13.2") def test_lora_exclude_modules_wanvace(self, base_pipe_output, tmpdirname): exclude_module_name = "vace_blocks.0.proj_out" diff --git a/tests/lora/utils.py b/tests/lora/utils.py index 7d33415d73..8a91a97689 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -2029,11 +2029,7 @@ class PeftLoraLoaderMixinTests: @pytest.mark.parametrize( "offload_type, use_stream", - [ - ("block_level", True), - ("leaf_level", False), - ("leaf_level", True), - ], + [("block_level", True), ("leaf_level", False), ("leaf_level", True)], ) @require_torch_accelerator def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname):