mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
up
This commit is contained in:
@@ -118,15 +118,15 @@ class TestCogVideoXLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3, pipe=pipe)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
def test_lora_scale_kwargs_match_fusion(self, base_pipe_output):
|
||||
def test_lora_scale_kwargs_match_fusion(self, pipe, base_pipe_output):
|
||||
super().test_lora_scale_kwargs_match_fusion(
|
||||
base_pipe_output=base_pipe_output, expected_atol=9e-3, expected_rtol=9e-3
|
||||
pipe=pipe, base_pipe_output=base_pipe_output, expected_atol=9e-3, expected_rtol=9e-3
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -109,11 +109,11 @@ class TestCogView4LoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"offload_type, use_stream",
|
||||
|
||||
@@ -149,11 +149,11 @@ class TestHunyuanVideoLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
@pytest.mark.skip("Not supported in HunyuanVideo.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
|
||||
@@ -108,11 +108,11 @@ class TestLTXVideoLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
@pytest.mark.skip("Not supported in LTXVideo.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
|
||||
@@ -99,11 +99,11 @@ class TestMochiLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
@pytest.mark.skip("Not supported in Mochi.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
|
||||
@@ -108,7 +108,7 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests):
|
||||
def test_multiple_wrong_adapter_name_raises_error(self):
|
||||
super().test_multiple_wrong_adapter_name_raises_error()
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
if torch.cuda.is_available():
|
||||
expected_atol = 9e-2
|
||||
expected_rtol = 9e-2
|
||||
@@ -117,10 +117,10 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests):
|
||||
expected_rtol = 1e-3
|
||||
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(
|
||||
expected_atol=expected_atol, expected_rtol=expected_rtol
|
||||
pipe=pipe, expected_atol=expected_atol, expected_rtol=expected_rtol
|
||||
)
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
if torch.cuda.is_available():
|
||||
expected_atol = 9e-2
|
||||
expected_rtol = 9e-2
|
||||
@@ -129,10 +129,10 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests):
|
||||
expected_rtol = 1e-3
|
||||
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(
|
||||
expected_atol=expected_atol, expected_rtol=expected_rtol
|
||||
pipe=pipe, expected_atol=expected_atol, expected_rtol=expected_rtol
|
||||
)
|
||||
|
||||
def test_lora_scale_kwargs_match_fusion(self, base_pipe_output):
|
||||
def test_lora_scale_kwargs_match_fusion(self, base_pipe_output, pipe):
|
||||
if torch.cuda.is_available():
|
||||
expected_atol = 9e-2
|
||||
expected_rtol = 9e-2
|
||||
@@ -141,7 +141,7 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests):
|
||||
expected_rtol = 1e-3
|
||||
|
||||
super().test_lora_scale_kwargs_match_fusion(
|
||||
base_pipe_output=base_pipe_output, expected_atol=expected_atol, expected_rtol=expected_rtol
|
||||
pipe=pipe, base_pipe_output=base_pipe_output, expected_atol=expected_atol, expected_rtol=expected_rtol
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -104,11 +104,11 @@ class TestWanLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
@pytest.mark.skip("Not supported in Wan.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
|
||||
@@ -119,11 +119,11 @@ class TestWanVACELoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
|
||||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)
|
||||
|
||||
@pytest.mark.skip("Not supported in Wan VACE.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
|
||||
Reference in New Issue
Block a user