mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
up
This commit is contained in:
@@ -131,13 +131,13 @@ class TestCogVideoXLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"offload_type, use_stream",
|
||||
[("block_level", True), ("leaf_level", False), ("leaf_level", True)],
|
||||
[("block_level", True), ("leaf_level", False)],
|
||||
)
|
||||
@require_torch_accelerator
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname):
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname, pipe):
|
||||
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
|
||||
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname)
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname, pipe)
|
||||
|
||||
@pytest.mark.skip("Not supported in CogVideoX.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
|
||||
@@ -117,13 +117,13 @@ class TestCogView4LoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"offload_type, use_stream",
|
||||
[("block_level", True), ("leaf_level", False), ("leaf_level", True)],
|
||||
[("block_level", True), ("leaf_level", False)],
|
||||
)
|
||||
@require_torch_accelerator
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname):
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname, pipe):
|
||||
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
|
||||
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname)
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname, pipe)
|
||||
|
||||
@pytest.mark.skip("Not supported in CogView4.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
|
||||
@@ -111,11 +111,8 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_with_alpha_in_state_dict(self, tmpdirname):
|
||||
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe = pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
def test_with_alpha_in_state_dict(self, tmpdirname, pipe):
|
||||
_, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
pipe.transformer.add_adapter(denoiser_lora_config)
|
||||
@@ -152,11 +149,8 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
)
|
||||
assert not np.allclose(images_lora_with_alpha, images_lora, atol=0.001, rtol=0.001)
|
||||
|
||||
def test_lora_expansion_works_for_absent_keys(self, base_pipe_output, tmpdirname):
|
||||
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe = pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
def test_lora_expansion_works_for_absent_keys(self, base_pipe_output, tmpdirname, pipe):
|
||||
_, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
# Modify the config to have a layer which won't be present in the second LoRA we will load.
|
||||
@@ -192,11 +186,8 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
"LoRA should lead to different results."
|
||||
)
|
||||
|
||||
def test_lora_expansion_works_for_extra_keys(self, base_pipe_output, tmpdirname):
|
||||
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe = pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
def test_lora_expansion_works_for_extra_keys(self, base_pipe_output, tmpdirname, pipe):
|
||||
_, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
modified_denoiser_lora_config = copy.deepcopy(denoiser_lora_config)
|
||||
@@ -312,12 +303,7 @@ class TestFluxControlLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
def test_with_norm_in_state_dict(self):
|
||||
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe = pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
||||
def test_with_norm_in_state_dict(self, pipe):
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
||||
@@ -346,6 +332,7 @@ class TestFluxControlLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
pipe.unload_lora_weights()
|
||||
lora_unload_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
|
||||
assert pipe.transformer._transformer_norm_layers is None
|
||||
assert np.allclose(original_output, lora_unload_output, atol=1e-05, rtol=1e-05)
|
||||
assert not np.allclose(original_output, lora_load_output, atol=1e-06, rtol=1e-06), (
|
||||
@@ -358,11 +345,8 @@ class TestFluxControlLoRA(PeftLoraLoaderMixinTests):
|
||||
pipe.load_lora_weights(norm_state_dict)
|
||||
assert "Unsupported keys found in state dict when trying to load normalization layers" in cap_logger.out
|
||||
|
||||
def test_lora_parameter_expanded_shapes(self):
|
||||
def test_lora_parameter_expanded_shapes(self, pipe):
|
||||
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe = pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
original_out = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
@@ -573,14 +557,10 @@ class TestFluxControlLoRA(PeftLoraLoaderMixinTests):
|
||||
lora_output_4 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert np.allclose(lora_output_3, lora_output_4, atol=0.001, rtol=0.001)
|
||||
|
||||
def test_load_regular_lora(self, base_pipe_output):
|
||||
def test_load_regular_lora(self, base_pipe_output, pipe):
|
||||
# This test checks if a regular lora (think of one trained on Flux.1 Dev for example) can be loaded
|
||||
# into the transformer with more input channels than Flux.1 Dev, for example. Some examples of those
|
||||
# transformers include Flux Fill, Flux Control, etc.
|
||||
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe = pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
out_features, in_features = pipe.transformer.x_embedder.weight.shape
|
||||
|
||||
@@ -138,11 +138,8 @@ class TestLumina2LoRA(PeftLoraLoaderMixinTests):
|
||||
reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.",
|
||||
strict=False,
|
||||
)
|
||||
def test_lora_fuse_nan(self):
|
||||
components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
|
||||
pipe = self.pipeline_class(**components)
|
||||
pipe = pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
def test_lora_fuse_nan(self, pipe):
|
||||
_, text_lora_config, denoiser_lora_config = self.get_dummy_components()
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
if "text_encoder" in self.pipeline_class._lora_loadable_modules:
|
||||
|
||||
@@ -31,7 +31,6 @@ from ..testing_utils import (
|
||||
require_peft_backend,
|
||||
require_peft_version_greater,
|
||||
skip_mps,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
|
||||
@@ -159,10 +158,9 @@ class TestWanVACELoRA(PeftLoraLoaderMixinTests):
|
||||
pass
|
||||
|
||||
@require_peft_version_greater("0.13.2")
|
||||
def test_lora_exclude_modules_wanvace(self, base_pipe_output, tmpdirname):
|
||||
def test_lora_exclude_modules_wanvace(self, base_pipe_output, tmpdirname, pipe):
|
||||
exclude_module_name = "vace_blocks.0.proj_out"
|
||||
components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
|
||||
pipe = self.pipeline_class(**components).to(torch_device)
|
||||
_, text_lora_config, denoiser_lora_config = self.get_dummy_components()
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
assert base_pipe_output.shape == self.output_shape
|
||||
|
||||
Reference in New Issue
Block a user