mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
up
This commit is contained in:
@@ -136,9 +136,8 @@ class TestCogView4LoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]
|
||||
|
||||
self.assertTrue(
|
||||
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
|
||||
"Loading from saved checkpoints should give same results.",
|
||||
assert np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3), (
|
||||
"Loading from saved checkpoints should give same results."
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -119,7 +119,7 @@ class TestFluxLoRA(PeftLoraLoaderMixinTests):
|
||||
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
||||
|
||||
pipe.transformer.add_adapter(denoiser_lora_config)
|
||||
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")
|
||||
assert check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer"
|
||||
|
||||
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
import gc
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
@@ -38,7 +37,6 @@ from ..testing_utils import (
|
||||
require_peft_backend,
|
||||
require_torch_accelerator,
|
||||
skip_mps,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
|
||||
@@ -207,7 +205,7 @@ class TestHunyuanVideoLoRA(PeftLoraLoaderMixinTests):
|
||||
@require_torch_accelerator
|
||||
@require_peft_backend
|
||||
@require_big_accelerator
|
||||
class HunyuanVideoLoRAIntegrationTests(unittest.TestCase):
|
||||
class TestHunyuanVideoLoRAIntegration:
|
||||
"""internal note: The integration slices were obtained on DGX.
|
||||
|
||||
torch: 2.5.1+cu124 with CUDA 12.5. Need the same setup for the
|
||||
@@ -217,9 +215,8 @@ class HunyuanVideoLoRAIntegrationTests(unittest.TestCase):
|
||||
num_inference_steps = 10
|
||||
seed = 0
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def pipeline(self, torch_device):
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
@@ -227,27 +224,27 @@ class HunyuanVideoLoRAIntegrationTests(unittest.TestCase):
|
||||
transformer = HunyuanVideoTransformer3DModel.from_pretrained(
|
||||
model_id, subfolder="transformer", torch_dtype=torch.bfloat16
|
||||
)
|
||||
self.pipeline = HunyuanVideoPipeline.from_pretrained(
|
||||
model_id, transformer=transformer, torch_dtype=torch.float16
|
||||
).to(torch_device)
|
||||
pipe = HunyuanVideoPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.float16).to(
|
||||
torch_device
|
||||
)
|
||||
try:
|
||||
yield pipe
|
||||
finally:
|
||||
del pipe
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def test_original_format_cseti(self):
|
||||
self.pipeline.load_lora_weights(
|
||||
def test_original_format_cseti(self, pipeline):
|
||||
pipeline.load_lora_weights(
|
||||
"Cseti/HunyuanVideo-LoRA-Arcane_Jinx-v1", weight_name="csetiarcane-nfjinx-v1-6000.safetensors"
|
||||
)
|
||||
self.pipeline.fuse_lora()
|
||||
self.pipeline.unload_lora_weights()
|
||||
self.pipeline.vae.enable_tiling()
|
||||
pipeline.fuse_lora()
|
||||
pipeline.unload_lora_weights()
|
||||
pipeline.vae.enable_tiling()
|
||||
|
||||
prompt = "CSETIARCANE. A cat walks on the grass, realistic"
|
||||
|
||||
out = self.pipeline(
|
||||
out = pipeline(
|
||||
prompt=prompt,
|
||||
height=320,
|
||||
width=512,
|
||||
|
||||
@@ -155,11 +155,11 @@ class TestLumina2LoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
if "text_encoder" in self.pipeline_class._lora_loadable_modules:
|
||||
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
|
||||
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
|
||||
|
||||
denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
|
||||
denoiser.add_adapter(denoiser_lora_config, "adapter-1")
|
||||
self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
|
||||
assert check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser."
|
||||
|
||||
# corrupt one LoRA weight with `inf` values
|
||||
with torch.no_grad():
|
||||
@@ -173,4 +173,4 @@ class TestLumina2LoRA(PeftLoraLoaderMixinTests):
|
||||
pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False)
|
||||
out = pipe(**inputs)[0]
|
||||
|
||||
self.assertTrue(np.isnan(out).all())
|
||||
assert np.isnan(out).all()
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
# limitations under the License.
|
||||
import gc
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from huggingface_hub import hf_hub_download
|
||||
@@ -91,16 +91,6 @@ class TestStableDiffusionLoRA(PeftLoraLoaderMixinTests):
|
||||
def output_shape(self):
|
||||
return (1, 64, 64, 3)
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
# Keeping this test here makes sense because it doesn't look any integration
|
||||
# (value assertions on logits).
|
||||
@slow
|
||||
@@ -114,15 +104,8 @@ class TestStableDiffusionLoRA(PeftLoraLoaderMixinTests):
|
||||
pipe.load_lora_weights(lora_id, adapter_name="adapter-2")
|
||||
pipe = pipe.to(torch_device)
|
||||
|
||||
self.assertTrue(
|
||||
check_if_lora_correctly_set(pipe.text_encoder),
|
||||
"Lora not correctly set in text encoder",
|
||||
)
|
||||
|
||||
self.assertTrue(
|
||||
check_if_lora_correctly_set(pipe.unet),
|
||||
"Lora not correctly set in unet",
|
||||
)
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
|
||||
assert check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in unet"
|
||||
|
||||
# We will offload the first adapter in CPU and check if the offloading
|
||||
# has been performed correctly
|
||||
@@ -130,35 +113,35 @@ class TestStableDiffusionLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
for name, module in pipe.unet.named_modules():
|
||||
if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device == torch.device("cpu"))
|
||||
assert module.weight.device == torch.device("cpu")
|
||||
elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device != torch.device("cpu"))
|
||||
assert module.weight.device != torch.device("cpu")
|
||||
|
||||
for name, module in pipe.text_encoder.named_modules():
|
||||
if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device == torch.device("cpu"))
|
||||
assert module.weight.device == torch.device("cpu")
|
||||
elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device != torch.device("cpu"))
|
||||
assert module.weight.device != torch.device("cpu")
|
||||
|
||||
pipe.set_lora_device(["adapter-1"], 0)
|
||||
|
||||
for n, m in pipe.unet.named_modules():
|
||||
if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(m.weight.device != torch.device("cpu"))
|
||||
assert m.weight.device != torch.device("cpu")
|
||||
|
||||
for n, m in pipe.text_encoder.named_modules():
|
||||
if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(m.weight.device != torch.device("cpu"))
|
||||
assert m.weight.device != torch.device("cpu")
|
||||
|
||||
pipe.set_lora_device(["adapter-1", "adapter-2"], torch_device)
|
||||
|
||||
for n, m in pipe.unet.named_modules():
|
||||
if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(m.weight.device != torch.device("cpu"))
|
||||
assert m.weight.device != torch.device("cpu")
|
||||
|
||||
for n, m in pipe.text_encoder.named_modules():
|
||||
if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(m.weight.device != torch.device("cpu"))
|
||||
assert m.weight.device != torch.device("cpu")
|
||||
|
||||
@slow
|
||||
@require_torch_accelerator
|
||||
@@ -181,15 +164,9 @@ class TestStableDiffusionLoRA(PeftLoraLoaderMixinTests):
|
||||
pipe.unet.add_adapter(unet_lora_config, "adapter-1")
|
||||
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
|
||||
|
||||
self.assertTrue(
|
||||
check_if_lora_correctly_set(pipe.text_encoder),
|
||||
"Lora not correctly set in text encoder",
|
||||
)
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
|
||||
|
||||
self.assertTrue(
|
||||
check_if_lora_correctly_set(pipe.unet),
|
||||
"Lora not correctly set in unet",
|
||||
)
|
||||
assert check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in unet"
|
||||
|
||||
for name, param in pipe.unet.named_parameters():
|
||||
if "lora_" in name:
|
||||
@@ -225,17 +202,14 @@ class TestStableDiffusionLoRA(PeftLoraLoaderMixinTests):
|
||||
pipe.unet.add_adapter(config1, adapter_name="adapter-1")
|
||||
pipe = pipe.to(torch_device)
|
||||
|
||||
self.assertTrue(
|
||||
check_if_lora_correctly_set(pipe.unet),
|
||||
"Lora not correctly set in unet",
|
||||
)
|
||||
assert check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in unet"
|
||||
|
||||
# sanity check that the adapters don't target the same layers, otherwise the test passes even without the fix
|
||||
modules_adapter_0 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-0")}
|
||||
modules_adapter_1 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-1")}
|
||||
self.assertNotEqual(modules_adapter_0, modules_adapter_1)
|
||||
self.assertTrue(modules_adapter_0 - modules_adapter_1)
|
||||
self.assertTrue(modules_adapter_1 - modules_adapter_0)
|
||||
assert modules_adapter_0 != modules_adapter_1
|
||||
assert modules_adapter_0 - modules_adapter_1
|
||||
assert modules_adapter_1 - modules_adapter_0
|
||||
|
||||
# setting both separately works
|
||||
pipe.set_lora_device(["adapter-0"], "cpu")
|
||||
@@ -243,32 +217,30 @@ class TestStableDiffusionLoRA(PeftLoraLoaderMixinTests):
|
||||
|
||||
for name, module in pipe.unet.named_modules():
|
||||
if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device == torch.device("cpu"))
|
||||
assert module.weight.device == torch.device("cpu")
|
||||
elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device == torch.device("cpu"))
|
||||
assert module.weight.device == torch.device("cpu")
|
||||
|
||||
# setting both at once also works
|
||||
pipe.set_lora_device(["adapter-0", "adapter-1"], torch_device)
|
||||
|
||||
for name, module in pipe.unet.named_modules():
|
||||
if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device != torch.device("cpu"))
|
||||
assert module.weight.device != torch.device("cpu")
|
||||
elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
||||
self.assertTrue(module.weight.device != torch.device("cpu"))
|
||||
assert module.weight.device != torch.device("cpu")
|
||||
|
||||
|
||||
@slow
|
||||
@nightly
|
||||
@require_torch_accelerator
|
||||
@require_peft_backend
|
||||
class LoraIntegrationTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
class TestSDLoraIntegration:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _gc_and_cache_cleanup(self, torch_device):
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
yield
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
@@ -280,10 +252,7 @@ class LoraIntegrationTests(unittest.TestCase):
|
||||
pipe.load_lora_weights(lora_id)
|
||||
pipe = pipe.to(torch_device)
|
||||
|
||||
self.assertTrue(
|
||||
check_if_lora_correctly_set(pipe.text_encoder),
|
||||
"Lora not correctly set in text encoder",
|
||||
)
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
|
||||
|
||||
prompt = "a red sks dog"
|
||||
|
||||
@@ -312,10 +281,7 @@ class LoraIntegrationTests(unittest.TestCase):
|
||||
pipe.load_lora_weights(lora_id)
|
||||
pipe = pipe.to(torch_device)
|
||||
|
||||
self.assertTrue(
|
||||
check_if_lora_correctly_set(pipe.text_encoder),
|
||||
"Lora not correctly set in text encoder",
|
||||
)
|
||||
assert check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
|
||||
|
||||
prompt = "a red sks dog"
|
||||
|
||||
@@ -587,8 +553,8 @@ class LoraIntegrationTests(unittest.TestCase):
|
||||
).images
|
||||
unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten()
|
||||
|
||||
self.assertFalse(np.allclose(initial_images, lora_images))
|
||||
self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3))
|
||||
assert not np.allclose(initial_images, lora_images)
|
||||
assert np.allclose(initial_images, unloaded_lora_images, atol=1e-3)
|
||||
|
||||
release_memory(pipe)
|
||||
|
||||
@@ -625,8 +591,8 @@ class LoraIntegrationTests(unittest.TestCase):
|
||||
).images
|
||||
unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten()
|
||||
|
||||
self.assertFalse(np.allclose(initial_images, lora_images))
|
||||
self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3))
|
||||
assert not np.allclose(initial_images, lora_images)
|
||||
assert np.allclose(initial_images, unloaded_lora_images, atol=1e-3)
|
||||
|
||||
# make sure we can load a LoRA again after unloading and they don't have
|
||||
# any undesired effects.
|
||||
@@ -637,7 +603,7 @@ class LoraIntegrationTests(unittest.TestCase):
|
||||
).images
|
||||
lora_images_again = lora_images_again[0, -3:, -3:, -1].flatten()
|
||||
|
||||
self.assertTrue(np.allclose(lora_images, lora_images_again, atol=1e-3))
|
||||
assert np.allclose(lora_images, lora_images_again, atol=1e-3)
|
||||
release_memory(pipe)
|
||||
|
||||
def test_not_empty_state_dict(self):
|
||||
@@ -651,7 +617,7 @@ class LoraIntegrationTests(unittest.TestCase):
|
||||
lcm_lora = load_file(cached_file)
|
||||
|
||||
pipe.load_lora_weights(lcm_lora, adapter_name="lcm")
|
||||
self.assertTrue(lcm_lora != {})
|
||||
assert lcm_lora != {}
|
||||
release_memory(pipe)
|
||||
|
||||
def test_load_unload_load_state_dict(self):
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
import gc
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
@@ -143,17 +142,15 @@ class TestSD3LoRA(PeftLoraLoaderMixinTests):
|
||||
@require_torch_accelerator
|
||||
@require_peft_backend
|
||||
@require_big_accelerator
|
||||
class SD3LoraIntegrationTests(unittest.TestCase):
|
||||
class TestSD3LoraIntegration:
|
||||
pipeline_class = StableDiffusion3Img2ImgPipeline
|
||||
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@pytest.fixture(autouse=True)
|
||||
def _gc_and_cache_cleanup(self, torch_device):
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
yield
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ import gc
|
||||
import importlib
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from packaging import version
|
||||
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
||||
@@ -104,16 +104,6 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests):
|
||||
def output_shape(self):
|
||||
return (1, 64, 64, 3)
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
@is_flaky
|
||||
def test_multiple_wrong_adapter_name_raises_error(self):
|
||||
super().test_multiple_wrong_adapter_name_raises_error()
|
||||
@@ -157,14 +147,12 @@ class TestStableDiffusionXLLoRA(PeftLoraLoaderMixinTests):
|
||||
@nightly
|
||||
@require_torch_accelerator
|
||||
@require_peft_backend
|
||||
class LoraSDXLIntegrationTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
class TestLoraSDXLIntegration:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _gc_and_cache_cleanup(self, torch_device):
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
yield
|
||||
gc.collect()
|
||||
backend_empty_cache(torch_device)
|
||||
|
||||
@@ -383,7 +371,7 @@ class LoraSDXLIntegrationTests(unittest.TestCase):
|
||||
end_time = time.time()
|
||||
elapsed_time_fusion = end_time - start_time
|
||||
|
||||
self.assertTrue(elapsed_time_fusion < elapsed_time_non_fusion)
|
||||
assert elapsed_time_fusion < elapsed_time_non_fusion
|
||||
|
||||
release_memory(pipe)
|
||||
|
||||
@@ -439,14 +427,14 @@ class LoraSDXLIntegrationTests(unittest.TestCase):
|
||||
|
||||
for key, value in text_encoder_1_sd.items():
|
||||
key = remap_key(key, fused_te_state_dict)
|
||||
self.assertTrue(torch.allclose(fused_te_state_dict[key], value))
|
||||
assert torch.allclose(fused_te_state_dict[key], value)
|
||||
|
||||
for key, value in text_encoder_2_sd.items():
|
||||
key = remap_key(key, fused_te_2_state_dict)
|
||||
self.assertTrue(torch.allclose(fused_te_2_state_dict[key], value))
|
||||
assert torch.allclose(fused_te_2_state_dict[key], value)
|
||||
|
||||
for key, value in unet_state_dict.items():
|
||||
self.assertTrue(torch.allclose(unet_state_dict[key], value))
|
||||
assert torch.allclose(unet_state_dict[key], value)
|
||||
|
||||
pipe.fuse_lora()
|
||||
pipe.unload_lora_weights()
|
||||
@@ -589,7 +577,7 @@ class LoraSDXLIntegrationTests(unittest.TestCase):
|
||||
pipe.load_lora_weights(lora_id, weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
|
||||
pipe = pipe.to(torch_device)
|
||||
|
||||
self.assertTrue(check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in Unet")
|
||||
assert check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in Unet"
|
||||
|
||||
prompt = "toy_face of a hacker with a hoodie"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user