mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
up
This commit is contained in:
@@ -13,16 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import AutoTokenizer, UMT5EncoderModel
|
||||
|
||||
from diffusers import (
|
||||
AuraFlowPipeline,
|
||||
AuraFlowTransformer2DModel,
|
||||
FlowMatchEulerDiscreteScheduler,
|
||||
)
|
||||
from diffusers import AuraFlowPipeline, AuraFlowTransformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
|
||||
from ..testing_utils import (
|
||||
floats_tensor,
|
||||
@@ -103,34 +99,42 @@ class TestAuraFlowLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
@unittest.skip("Not supported in AuraFlow.")
|
||||
pytest.mark.skip("Not supported in AuraFlow.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in AuraFlow.")
|
||||
pytest.mark.skip("Not supported in AuraFlow.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in AuraFlow.")
|
||||
pytest.mark.skip("Not supported in AuraFlow.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@@ -13,10 +13,9 @@
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from parameterized import parameterized
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
from diffusers import (
|
||||
@@ -128,45 +127,61 @@ class TestCogVideoXLoRA(PeftLoraLoaderMixinTests):
|
||||
def test_lora_scale_kwargs_match_fusion(self):
|
||||
super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3)
|
||||
|
||||
@parameterized.expand([("block_level", True), ("leaf_level", False)])
|
||||
@pytest.mark.parametrize(
|
||||
"offload_type, use_stream",
|
||||
[
|
||||
("block_level", True),
|
||||
("leaf_level", False),
|
||||
("leaf_level", True),
|
||||
],
|
||||
)
|
||||
@require_torch_accelerator
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname):
|
||||
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
|
||||
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname)
|
||||
|
||||
pytest.mark.skip("Not supported in CogVideoX.")
|
||||
|
||||
@unittest.skip("Not supported in CogVideoX.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in CogVideoX.")
|
||||
pytest.mark.skip("Not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in CogVideoX.")
|
||||
pytest.mark.skip("Not supported in CogVideoX.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in CogVideoX.")
|
||||
pytest.mark.skip("Not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
||||
pass
|
||||
|
||||
@@ -13,11 +13,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from parameterized import parameterized
|
||||
from transformers import AutoTokenizer, GlmModel
|
||||
|
||||
from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
|
||||
@@ -142,41 +141,56 @@ class TestCogView4LoRA(PeftLoraLoaderMixinTests):
|
||||
"Loading from saved checkpoints should give same results.",
|
||||
)
|
||||
|
||||
@parameterized.expand([("block_level", True), ("leaf_level", False)])
|
||||
@pytest.mark.parametrize(
|
||||
"offload_type, use_stream",
|
||||
[
|
||||
("block_level", True),
|
||||
("leaf_level", False),
|
||||
("leaf_level", True),
|
||||
],
|
||||
)
|
||||
@require_torch_accelerator
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname):
|
||||
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
|
||||
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
|
||||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname)
|
||||
|
||||
pytest.mark.skip("Not supported in CogView4.")
|
||||
|
||||
@unittest.skip("Not supported in CogView4.")
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in CogView4.")
|
||||
pytest.mark.skip("Not supported in CogView4.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in CogView4.")
|
||||
pytest.mark.skip("Not supported in CogView4.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@@ -22,12 +22,11 @@ import numpy as np
|
||||
import pytest
|
||||
import safetensors.torch
|
||||
import torch
|
||||
from parameterized import parameterized
|
||||
from PIL import Image
|
||||
from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel
|
||||
|
||||
from diffusers import FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxPipeline, FluxTransformer2DModel
|
||||
from diffusers.utils import load_image, logging
|
||||
from diffusers.utils import logging
|
||||
|
||||
from ..testing_utils import (
|
||||
CaptureLogger,
|
||||
@@ -169,9 +168,8 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
assert check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer"
|
||||
|
||||
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
||||
assert not (
|
||||
np.allclose(images_lora, base_pipe_output, atol=0.001, rtol=0.001),
|
||||
"LoRA should lead to different results.",
|
||||
assert not np.allclose(images_lora, base_pipe_output, atol=0.001, rtol=0.001), (
|
||||
"LoRA should lead to different results."
|
||||
)
|
||||
denoiser_state_dict = get_peft_model_state_dict(pipe.transformer)
|
||||
self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict)
|
||||
@@ -188,13 +186,11 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
assert check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer"
|
||||
|
||||
images_lora_with_absent_keys = pipe(**inputs, generator=torch.manual_seed(0)).images
|
||||
assert not (
|
||||
np.allclose(images_lora, images_lora_with_absent_keys, atol=0.001, rtol=0.001),
|
||||
"Different LoRAs should lead to different results.",
|
||||
assert not np.allclose(images_lora, images_lora_with_absent_keys, atol=0.001, rtol=0.001), (
|
||||
"Different LoRAs should lead to different results."
|
||||
)
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, images_lora_with_absent_keys, atol=0.001, rtol=0.001),
|
||||
"LoRA should lead to different results.",
|
||||
assert not np.allclose(base_pipe_output, images_lora_with_absent_keys, atol=0.001, rtol=0.001), (
|
||||
"LoRA should lead to different results."
|
||||
)
|
||||
|
||||
def test_lora_expansion_works_for_extra_keys(self, base_pipe_output, tmpdirname):
|
||||
@@ -210,10 +206,10 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
assert check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer"
|
||||
|
||||
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
||||
assert not (
|
||||
np.allclose(images_lora, base_pipe_output, atol=0.001, rtol=0.001),
|
||||
"LoRA should lead to different results.",
|
||||
assert not np.allclose(images_lora, base_pipe_output, atol=0.001, rtol=0.001), (
|
||||
"LoRA should lead to different results."
|
||||
)
|
||||
|
||||
denoiser_state_dict = get_peft_model_state_dict(pipe.transformer)
|
||||
self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict)
|
||||
assert os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))
|
||||
@@ -228,28 +224,30 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
assert check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer"
|
||||
|
||||
images_lora_with_extra_keys = pipe(**inputs, generator=torch.manual_seed(0)).images
|
||||
assert not (
|
||||
np.allclose(images_lora, images_lora_with_extra_keys, atol=0.001, rtol=0.001),
|
||||
"Different LoRAs should lead to different results.",
|
||||
assert not np.allclose(images_lora, images_lora_with_extra_keys, atol=0.001, rtol=0.001), (
|
||||
"Different LoRAs should lead to different results."
|
||||
)
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, images_lora_with_extra_keys, atol=0.001, rtol=0.001),
|
||||
"LoRA should lead to different results.",
|
||||
assert not np.allclose(base_pipe_output, images_lora_with_extra_keys, atol=0.001, rtol=0.001), (
|
||||
"LoRA should lead to different results."
|
||||
)
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
||||
pass
|
||||
|
||||
@@ -355,9 +353,8 @@ class TestFluxControlLoRA(PeftLoraLoaderMixinTests):
|
||||
lora_unload_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert pipe.transformer._transformer_norm_layers is None
|
||||
assert np.allclose(original_output, lora_unload_output, atol=1e-05, rtol=1e-05)
|
||||
assert not (
|
||||
np.allclose(original_output, lora_load_output, atol=1e-06, rtol=1e-06),
|
||||
f"{norm_layer} is tested",
|
||||
assert not np.allclose(original_output, lora_load_output, atol=1e-06, rtol=1e-06), (
|
||||
f"{norm_layer} is tested"
|
||||
)
|
||||
|
||||
with CaptureLogger(logger) as cap_logger:
|
||||
@@ -729,19 +726,23 @@ class TestFluxControlLoRA(PeftLoraLoaderMixinTests):
|
||||
assert pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2
|
||||
assert pipe.transformer.config.in_channels == in_features * 2
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Flux.")
|
||||
pytest.mark.skip("Not supported in Flux.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
||||
pass
|
||||
|
||||
@@ -872,89 +873,3 @@ class FluxLoRAIntegrationTests(unittest.TestCase):
|
||||
)
|
||||
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
||||
assert max_diff < 0.001
|
||||
|
||||
|
||||
@nightly
|
||||
@require_torch_accelerator
|
||||
@require_peft_backend
|
||||
@require_big_accelerator
|
||||
class FluxControlLoRAIntegrationTests(unittest.TestCase):
|
||||
num_inference_steps = 10
|
||||
seed = 0
|
||||
prompt = "A robot made of exotic candies and chocolates of different kinds."
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
self.pipeline = FluxControlPipeline.from_pretrained(
|
||||
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
|
||||
).to(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
@parameterized.expand(["black-forest-labs/FLUX.1-Canny-dev-lora", "black-forest-labs/FLUX.1-Depth-dev-lora"])
|
||||
def test_lora(self, lora_ckpt_id):
|
||||
self.pipeline.load_lora_weights(lora_ckpt_id)
|
||||
self.pipeline.fuse_lora()
|
||||
self.pipeline.unload_lora_weights()
|
||||
if "Canny" in lora_ckpt_id:
|
||||
control_image = load_image(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png"
|
||||
)
|
||||
else:
|
||||
control_image = load_image(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png"
|
||||
)
|
||||
image = self.pipeline(
|
||||
prompt=self.prompt,
|
||||
control_image=control_image,
|
||||
height=1024,
|
||||
width=1024,
|
||||
num_inference_steps=self.num_inference_steps,
|
||||
guidance_scale=30.0 if "Canny" in lora_ckpt_id else 10.0,
|
||||
output_type="np",
|
||||
generator=torch.manual_seed(self.seed),
|
||||
).images
|
||||
out_slice = image[0, -3:, -3:, -1].flatten()
|
||||
if "Canny" in lora_ckpt_id:
|
||||
expected_slice = np.array([0.8438, 0.8438, 0.8438, 0.8438, 0.8438, 0.8398, 0.8438, 0.8438, 0.8516])
|
||||
else:
|
||||
expected_slice = np.array([0.8203, 0.832, 0.8359, 0.8203, 0.8281, 0.8281, 0.8203, 0.8242, 0.8359])
|
||||
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
||||
assert max_diff < 0.001
|
||||
|
||||
@parameterized.expand(["black-forest-labs/FLUX.1-Canny-dev-lora", "black-forest-labs/FLUX.1-Depth-dev-lora"])
|
||||
def test_lora_with_turbo(self, lora_ckpt_id):
|
||||
self.pipeline.load_lora_weights(lora_ckpt_id)
|
||||
self.pipeline.load_lora_weights("ByteDance/Hyper-SD", weight_name="Hyper-FLUX.1-dev-8steps-lora.safetensors")
|
||||
self.pipeline.fuse_lora()
|
||||
self.pipeline.unload_lora_weights()
|
||||
if "Canny" in lora_ckpt_id:
|
||||
control_image = load_image(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png"
|
||||
)
|
||||
else:
|
||||
control_image = load_image(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png"
|
||||
)
|
||||
image = self.pipeline(
|
||||
prompt=self.prompt,
|
||||
control_image=control_image,
|
||||
height=1024,
|
||||
width=1024,
|
||||
num_inference_steps=self.num_inference_steps,
|
||||
guidance_scale=30.0 if "Canny" in lora_ckpt_id else 10.0,
|
||||
output_type="np",
|
||||
generator=torch.manual_seed(self.seed),
|
||||
).images
|
||||
out_slice = image[0, -3:, -3:, -1].flatten()
|
||||
if "Canny" in lora_ckpt_id:
|
||||
expected_slice = np.array([0.6562, 0.7266, 0.7578, 0.6367, 0.6758, 0.7031, 0.6172, 0.6602, 0.6484])
|
||||
else:
|
||||
expected_slice = np.array([0.668, 0.7344, 0.7656, 0.6484, 0.6875, 0.7109, 0.6328, 0.6719, 0.6562])
|
||||
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
||||
assert max_diff < 0.001
|
||||
|
||||
@@ -17,6 +17,7 @@ import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast
|
||||
|
||||
@@ -156,39 +157,48 @@ class TestHunyuanVideoLoRA(PeftLoraLoaderMixinTests):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
|
||||
# TODO(aryan): Fix the following test
|
||||
@unittest.skip("This test fails with an error I haven't been able to debug yet.")
|
||||
pytest.mark.skip("This test fails with an error I haven't been able to debug yet.")
|
||||
|
||||
def test_simple_inference_save_pretrained(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Not supported in HunyuanVideo.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Not supported in HunyuanVideo.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Not supported in HunyuanVideo.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
@@ -114,34 +114,42 @@ class TestLTXVideoLoRA(PeftLoraLoaderMixinTests):
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
|
||||
@unittest.skip("Not supported in LTXVideo.")
|
||||
pytest.mark.skip("Not supported in LTXVideo.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in LTXVideo.")
|
||||
pytest.mark.skip("Not supported in LTXVideo.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in LTXVideo.")
|
||||
pytest.mark.skip("Not supported in LTXVideo.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in LTXVideo.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
@@ -101,35 +100,43 @@ class TestLumina2LoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
@unittest.skip("Not supported in Lumina2.")
|
||||
pytest.mark.skip("Not supported in Lumina2.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Lumina2.")
|
||||
pytest.mark.skip("Not supported in Lumina2.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Lumina2.")
|
||||
pytest.mark.skip("Not supported in Lumina2.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Lumina2.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
@@ -105,38 +105,47 @@ class TestMochiLoRA(PeftLoraLoaderMixinTests):
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
|
||||
@unittest.skip("Not supported in Mochi.")
|
||||
pytest.mark.skip("Not supported in Mochi.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Mochi.")
|
||||
pytest.mark.skip("Not supported in Mochi.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Mochi.")
|
||||
pytest.mark.skip("Not supported in Mochi.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Mochi.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in CogVideoX.")
|
||||
pytest.mark.skip("Not supported in CogVideoX.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
||||
pass
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer
|
||||
|
||||
@@ -96,34 +96,42 @@ class TestQwenImageLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
@unittest.skip("Not supported in Qwen Image.")
|
||||
pytest.mark.skip("Not supported in Qwen Image.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Qwen Image.")
|
||||
pytest.mark.skip("Not supported in Qwen Image.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Qwen Image.")
|
||||
pytest.mark.skip("Not supported in Qwen Image.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Qwen Image.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import Gemma2Model, GemmaTokenizer
|
||||
|
||||
@@ -105,34 +105,42 @@ class TestSanaLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
return noise, input_ids, pipeline_inputs
|
||||
|
||||
@unittest.skip("Not supported in SANA.")
|
||||
pytest.mark.skip("Not supported in SANA.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in SANA.")
|
||||
pytest.mark.skip("Not supported in SANA.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in SANA.")
|
||||
pytest.mark.skip("Not supported in SANA.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in SANA.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in SANA.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in SANA.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in SANA.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in SANA.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@@ -17,6 +17,7 @@ import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
|
||||
|
||||
@@ -113,19 +114,23 @@ class TestSD3LoRA(PeftLoraLoaderMixinTests):
|
||||
lora_filename = "lora_peft_format.safetensors"
|
||||
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
||||
|
||||
@unittest.skip("Not supported in SD3.")
|
||||
pytest.mark.skip("Not supported in SD3.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in SD3.")
|
||||
pytest.mark.skip("Not supported in SD3.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in SD3.")
|
||||
pytest.mark.skip("Not supported in SD3.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in SD3.")
|
||||
pytest.mark.skip("Not supported in SD3.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import AutoTokenizer, T5EncoderModel
|
||||
|
||||
@@ -110,34 +110,42 @@ class TestWanLoRA(PeftLoraLoaderMixinTests):
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
|
||||
@unittest.skip("Not supported in Wan.")
|
||||
pytest.mark.skip("Not supported in Wan.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Wan.")
|
||||
pytest.mark.skip("Not supported in Wan.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Wan.")
|
||||
pytest.mark.skip("Not supported in Wan.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import safetensors.torch
|
||||
import torch
|
||||
from PIL import Image
|
||||
@@ -126,35 +126,43 @@ class TestWanVACELoRA(PeftLoraLoaderMixinTests):
|
||||
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
||||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
||||
|
||||
@unittest.skip("Not supported in Wan VACE.")
|
||||
pytest.mark.skip("Not supported in Wan VACE.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Wan VACE.")
|
||||
pytest.mark.skip("Not supported in Wan VACE.")
|
||||
|
||||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Not supported in Wan VACE.")
|
||||
pytest.mark.skip("Not supported in Wan VACE.")
|
||||
|
||||
def test_modify_padding_mode(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
|
||||
def test_simple_inference_with_partial_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
|
||||
def test_simple_inference_with_text_lora(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
|
||||
def test_simple_inference_with_text_lora_and_scale(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
|
||||
def test_simple_inference_with_text_lora_fused(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
pytest.mark.skip("Text encoder LoRA is not supported in Wan VACE.")
|
||||
|
||||
def test_simple_inference_with_text_lora_save_load(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ from itertools import product
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from parameterized import parameterized
|
||||
|
||||
from diffusers import AutoencoderKL, UNet2DConditionModel
|
||||
from diffusers.utils import logging
|
||||
@@ -243,19 +242,19 @@ class PeftLoraLoaderMixinTests:
|
||||
if "text_encoder" in self.pipeline_class._lora_loadable_modules:
|
||||
pipe.text_encoder.add_adapter(text_lora_config, adapter_name=adapter_name)
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
|
||||
|
||||
|
||||
if denoiser_lora_config is not None:
|
||||
denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
|
||||
denoiser.add_adapter(denoiser_lora_config, adapter_name=adapter_name)
|
||||
assert check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser."
|
||||
else:
|
||||
denoiser = None
|
||||
|
||||
|
||||
if text_lora_config is not None and self.has_two_text_encoders or self.has_three_text_encoders:
|
||||
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
|
||||
pipe.text_encoder_2.add_adapter(text_lora_config, adapter_name=adapter_name)
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
|
||||
|
||||
|
||||
return pipe, denoiser
|
||||
|
||||
def _compute_baseline_output(self):
|
||||
@@ -468,9 +467,8 @@ class PeftLoraLoaderMixinTests:
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
|
||||
|
||||
ouput_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(ouput_fused, base_pipe_output, atol=1e-3, rtol=1e-3),
|
||||
"Fused lora should change the output",
|
||||
assert not np.allclose(ouput_fused, base_pipe_output, atol=1e-3, rtol=1e-3), (
|
||||
"Fused lora should change the output"
|
||||
)
|
||||
|
||||
def test_simple_inference_with_text_lora_unloaded(self, base_pipe_output):
|
||||
@@ -491,7 +489,9 @@ class PeftLoraLoaderMixinTests:
|
||||
|
||||
if self.has_two_text_encoders or self.has_three_text_encoders:
|
||||
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
|
||||
assert not check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly unloaded in text encoder 2"
|
||||
assert not check_if_lora_correctly_set(pipe.text_encoder_2), (
|
||||
"Lora not correctly unloaded in text encoder 2"
|
||||
)
|
||||
|
||||
ouput_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert np.allclose(ouput_unloaded, base_pipe_output, atol=1e-3, rtol=1e-3), (
|
||||
@@ -690,9 +690,8 @@ class PeftLoraLoaderMixinTests:
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
|
||||
|
||||
output_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(output_fused, base_pipe_output, atol=1e-3, rtol=1e-3),
|
||||
"Fused lora should change the output",
|
||||
assert not np.allclose(output_fused, base_pipe_output, atol=1e-3, rtol=1e-3), (
|
||||
"Fused lora should change the output"
|
||||
)
|
||||
|
||||
def test_simple_inference_with_text_denoiser_lora_unloaded(self, base_pipe_output):
|
||||
@@ -714,9 +713,8 @@ class PeftLoraLoaderMixinTests:
|
||||
|
||||
if self.has_two_text_encoders or self.has_three_text_encoders:
|
||||
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
|
||||
assert not (
|
||||
check_if_lora_correctly_set(pipe.text_encoder_2),
|
||||
"Lora not correctly unloaded in text encoder 2",
|
||||
assert not check_if_lora_correctly_set(pipe.text_encoder_2), (
|
||||
"Lora not correctly unloaded in text encoder 2"
|
||||
)
|
||||
|
||||
output_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
@@ -791,36 +789,30 @@ class PeftLoraLoaderMixinTests:
|
||||
|
||||
pipe.set_adapters("adapter-1")
|
||||
output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, output_adapter_1, atol=1e-3, rtol=1e-3),
|
||||
"Adapter outputs should be different.",
|
||||
assert not np.allclose(base_pipe_output, output_adapter_1, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter outputs should be different."
|
||||
)
|
||||
|
||||
pipe.set_adapters("adapter-2")
|
||||
output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, output_adapter_2, atol=1e-3, rtol=1e-3),
|
||||
"Adapter outputs should be different.",
|
||||
assert not np.allclose(base_pipe_output, output_adapter_2, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter outputs should be different."
|
||||
)
|
||||
|
||||
pipe.set_adapters(["adapter-1", "adapter-2"])
|
||||
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter outputs should be different.",
|
||||
assert not np.allclose(base_pipe_output, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter outputs should be different."
|
||||
)
|
||||
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and 2 should give different results",
|
||||
assert not np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and 2 should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and mixed adapters should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 2 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 2 and mixed adapters should give different results"
|
||||
)
|
||||
|
||||
pipe.disable_lora()
|
||||
@@ -902,17 +894,15 @@ class PeftLoraLoaderMixinTests:
|
||||
weights_2 = {"unet": {"up": 5}}
|
||||
pipe.set_adapters("adapter-1", weights_2)
|
||||
output_weights_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(output_weights_1, output_weights_2, atol=1e-3, rtol=1e-3),
|
||||
"LoRA weights 1 and 2 should give different results",
|
||||
|
||||
assert not np.allclose(output_weights_1, output_weights_2, atol=1e-3, rtol=1e-3), (
|
||||
"LoRA weights 1 and 2 should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, output_weights_1, atol=1e-3, rtol=1e-3),
|
||||
"No adapter and LoRA weights 1 should give different results",
|
||||
assert not np.allclose(base_pipe_output, output_weights_1, atol=1e-3, rtol=1e-3), (
|
||||
"No adapter and LoRA weights 1 should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, output_weights_2, atol=1e-3, rtol=1e-3),
|
||||
"No adapter and LoRA weights 2 should give different results",
|
||||
assert not np.allclose(base_pipe_output, output_weights_2, atol=1e-3, rtol=1e-3), (
|
||||
"No adapter and LoRA weights 2 should give different results"
|
||||
)
|
||||
|
||||
pipe.disable_lora()
|
||||
@@ -952,21 +942,21 @@ class PeftLoraLoaderMixinTests:
|
||||
scales_2 = {"unet": {"down": 5, "mid": 5}}
|
||||
pipe.set_adapters("adapter-1", scales_1)
|
||||
output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
|
||||
pipe.set_adapters("adapter-2", scales_2)
|
||||
output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
|
||||
pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1, scales_2])
|
||||
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and 2 should give different results",
|
||||
|
||||
assert not np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and 2 should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and mixed adapters should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 2 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 2 and mixed adapters should give different results"
|
||||
)
|
||||
|
||||
pipe.disable_lora()
|
||||
@@ -1093,17 +1083,14 @@ class PeftLoraLoaderMixinTests:
|
||||
pipe.set_adapters(["adapter-1", "adapter-2"])
|
||||
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and 2 should give different results",
|
||||
assert not np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and 2 should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and mixed adapters should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 2 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 2 and mixed adapters should give different results"
|
||||
)
|
||||
|
||||
pipe.delete_adapters("adapter-1")
|
||||
@@ -1171,24 +1158,20 @@ class PeftLoraLoaderMixinTests:
|
||||
|
||||
pipe.set_adapters(["adapter-1", "adapter-2"])
|
||||
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and 2 should give different results",
|
||||
assert not np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and 2 should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 1 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 1 and mixed adapters should give different results"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Adapter 2 and mixed adapters should give different results",
|
||||
assert not np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Adapter 2 and mixed adapters should give different results"
|
||||
)
|
||||
|
||||
pipe.set_adapters(["adapter-1", "adapter-2"], [0.5, 0.6])
|
||||
output_adapter_mixed_weighted = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(output_adapter_mixed_weighted, output_adapter_mixed, atol=1e-3, rtol=1e-3),
|
||||
"Weighted adapter and mixed adapter should give different results",
|
||||
assert not np.allclose(output_adapter_mixed_weighted, output_adapter_mixed, atol=1e-3, rtol=1e-3), (
|
||||
"Weighted adapter and mixed adapter should give different results"
|
||||
)
|
||||
|
||||
pipe.disable_lora()
|
||||
@@ -1456,9 +1439,8 @@ class PeftLoraLoaderMixinTests:
|
||||
assert np.allclose(outputs_lora_1, outputs_lora_1_fused, atol=expected_atol, rtol=expected_rtol), (
|
||||
"Fused lora should not change the output"
|
||||
)
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, outputs_lora_1, atol=expected_atol, rtol=expected_rtol),
|
||||
"LoRA should change the output",
|
||||
assert not np.allclose(base_pipe_output, outputs_lora_1, atol=expected_atol, rtol=expected_rtol), (
|
||||
"LoRA should change the output"
|
||||
)
|
||||
|
||||
def test_simple_inference_with_dora(self):
|
||||
@@ -1474,9 +1456,8 @@ class PeftLoraLoaderMixinTests:
|
||||
|
||||
pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
|
||||
output_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert not (
|
||||
np.allclose(output_dora_lora, output_no_dora_lora, atol=1e-3, rtol=1e-3),
|
||||
"DoRA lora should change the output",
|
||||
assert not np.allclose(output_dora_lora, output_no_dora_lora, atol=1e-3, rtol=1e-3), (
|
||||
"DoRA lora should change the output"
|
||||
)
|
||||
|
||||
def test_missing_keys_warning(self, tmpdirname):
|
||||
@@ -1504,7 +1485,7 @@ class PeftLoraLoaderMixinTests:
|
||||
pipe.load_lora_weights(state_dict)
|
||||
|
||||
component = list({k.split(".")[0] for k in state_dict})[0]
|
||||
assert missing_key.replace(f"{component}.", "" in cap_logger.out.replace("default_0.", ""))
|
||||
assert missing_key.replace(f"{component}.", "") in cap_logger.out.replace("default_0.", "")
|
||||
|
||||
def test_unexpected_keys_warning(self, tmpdirname):
|
||||
components, _, denoiser_lora_config = self.get_dummy_components()
|
||||
@@ -1618,9 +1599,8 @@ class PeftLoraLoaderMixinTests:
|
||||
lora_scale = 0.5
|
||||
attention_kwargs = {attention_kwargs_name: {"scale": lora_scale}}
|
||||
output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
|
||||
assert not (
|
||||
np.allclose(base_pipe_output, output_lora_scale, atol=1e-3, rtol=1e-3),
|
||||
"Lora + scale should change the output",
|
||||
assert not np.allclose(base_pipe_output, output_lora_scale, atol=1e-3, rtol=1e-3), (
|
||||
"Lora + scale should change the output"
|
||||
)
|
||||
|
||||
pipe.set_adapters("default", lora_scale)
|
||||
@@ -1763,9 +1743,9 @@ class PeftLoraLoaderMixinTests:
|
||||
if "lora" in name or any((re.search(pattern, name) for pattern in patterns_to_check)):
|
||||
dtype_to_check = compute_dtype
|
||||
if getattr(submodule, "weight", None) is not None:
|
||||
self.assertEqual(submodule.weight.dtype, dtype_to_check)
|
||||
assert submodule.weight.dtype == dtype_to_check
|
||||
if getattr(submodule, "bias", None) is not None:
|
||||
self.assertEqual(submodule.bias.dtype, dtype_to_check)
|
||||
assert submodule.bias.dtype == dtype_to_check
|
||||
|
||||
def initialize_pipeline(storage_dtype=None, compute_dtype=torch.float32):
|
||||
components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
|
||||
@@ -1871,7 +1851,7 @@ class PeftLoraLoaderMixinTests:
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
|
||||
@parameterized.expand([4, 8, 16])
|
||||
@pytest.mark.parametrize("lora_alpha", [4, 8, 16])
|
||||
def test_lora_adapter_metadata_is_loaded_correctly(self, lora_alpha, tmpdirname):
|
||||
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(lora_alpha=lora_alpha)
|
||||
pipe = self.pipeline_class(**components)
|
||||
@@ -1914,7 +1894,7 @@ class PeftLoraLoaderMixinTests:
|
||||
parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=text_encoder_2_key
|
||||
)
|
||||
|
||||
@parameterized.expand([4, 8, 16])
|
||||
@pytest.mark.parametrize("lora_alpha", [4, 8, 16])
|
||||
def test_lora_adapter_metadata_save_load_inference(self, lora_alpha, tmpdirname):
|
||||
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(lora_alpha=lora_alpha)
|
||||
pipe = self.pipeline_class(**components).to(torch_device)
|
||||
@@ -2047,7 +2027,14 @@ class PeftLoraLoaderMixinTests:
|
||||
output_3 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
||||
assert np.allclose(output_1, output_3, atol=1e-3, rtol=1e-3)
|
||||
|
||||
@parameterized.expand([("block_level", True), ("leaf_level", False), ("leaf_level", True)])
|
||||
@pytest.mark.parametrize(
|
||||
"offload_type, use_stream",
|
||||
[
|
||||
("block_level", True),
|
||||
("leaf_level", False),
|
||||
("leaf_level", True),
|
||||
],
|
||||
)
|
||||
@require_torch_accelerator
|
||||
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname):
|
||||
for cls in inspect.getmro(self.__class__):
|
||||
@@ -2055,7 +2042,7 @@ class PeftLoraLoaderMixinTests:
|
||||
return
|
||||
self._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname)
|
||||
|
||||
@require_torch_accelerator
|
||||
@pytest.mark.skipif(torch_device == "cpu", reason="test requires accelerator+PyTorch")
|
||||
def test_lora_loading_model_cpu_offload(self, tmpdirname):
|
||||
components, _, denoiser_lora_config = self.get_dummy_components()
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
@@ -24,6 +24,7 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tupl
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
import PIL.ImageOps
|
||||
import pytest
|
||||
import requests
|
||||
from numpy.linalg import norm
|
||||
from packaging import version
|
||||
@@ -275,7 +276,7 @@ def nightly(test_case):
|
||||
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
|
||||
|
||||
"""
|
||||
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
|
||||
return pytest.mark.skipif(not _run_nightly_tests, reason="test is nightly")(test_case)
|
||||
|
||||
|
||||
def is_torch_compile(test_case):
|
||||
@@ -350,9 +351,9 @@ def require_torch_cuda_compatibility(expected_compute_capability):
|
||||
# These decorators are for accelerator-specific behaviours that are not GPU-specific
|
||||
def require_torch_accelerator(test_case):
|
||||
"""Decorator marking a test that requires an accelerator backend and PyTorch."""
|
||||
return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")(
|
||||
test_case
|
||||
)
|
||||
return pytest.mark.skipif(
|
||||
not (is_torch_available() and torch_device != "cpu"), reason="test requires accelerator+PyTorch"
|
||||
)(test_case)
|
||||
|
||||
|
||||
def require_torch_multi_gpu(test_case):
|
||||
@@ -441,9 +442,9 @@ def require_big_accelerator(test_case):
|
||||
device_properties = torch.cuda.get_device_properties(0)
|
||||
|
||||
total_memory = device_properties.total_memory / (1024**3)
|
||||
return unittest.skipUnless(
|
||||
total_memory >= BIG_GPU_MEMORY,
|
||||
f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory",
|
||||
return pytest.mark.skipif(
|
||||
not total_memory >= BIG_GPU_MEMORY,
|
||||
reason=f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory",
|
||||
)(test_case)
|
||||
|
||||
|
||||
@@ -509,7 +510,7 @@ def require_peft_backend(test_case):
|
||||
Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and
|
||||
transformers.
|
||||
"""
|
||||
return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case)
|
||||
return pytest.mark.skipif(not USE_PEFT_BACKEND, reason="test requires PEFT backend")(test_case)
|
||||
|
||||
|
||||
def require_timm(test_case):
|
||||
@@ -550,8 +551,8 @@ def require_peft_version_greater(peft_version):
|
||||
correct_peft_version = is_peft_available() and version.parse(
|
||||
version.parse(importlib.metadata.version("peft")).base_version
|
||||
) > version.parse(peft_version)
|
||||
return unittest.skipUnless(
|
||||
correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}"
|
||||
return pytest.mark.skipif(
|
||||
not correct_peft_version, reason=f"test requires PEFT backend with the version greater than {peft_version}"
|
||||
)(test_case)
|
||||
|
||||
return decorator
|
||||
@@ -567,9 +568,9 @@ def require_transformers_version_greater(transformers_version):
|
||||
correct_transformers_version = is_transformers_available() and version.parse(
|
||||
version.parse(importlib.metadata.version("transformers")).base_version
|
||||
) > version.parse(transformers_version)
|
||||
return unittest.skipUnless(
|
||||
correct_transformers_version,
|
||||
f"test requires transformers with the version greater than {transformers_version}",
|
||||
return pytest.mark.skipif(
|
||||
not correct_transformers_version,
|
||||
reason=f"test requires transformers with the version greater than {transformers_version}",
|
||||
)(test_case)
|
||||
|
||||
return decorator
|
||||
|
||||
Reference in New Issue
Block a user