From b4226bd6a742640d98bf0ce17e984cdc92f4cdf6 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Fri, 8 Mar 2024 18:53:07 +0530 Subject: [PATCH] [Tests] fix config checking tests (#7247) * debig * cast tuples to lists. * debug * handle upcast attention * handle downblock types for vae. * remove print. * address Dhruv's comments. * fix: upblock types. * upcast attention * debug * debug * debug * better guarding. * style --- src/diffusers/loaders/single_file_utils.py | 12 ++++++------ .../stable_diffusion_xl/test_stable_diffusion_xl.py | 2 ++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py index 085c3c12cd..e844a1cafa 100644 --- a/src/diffusers/loaders/single_file_utils.py +++ b/src/diffusers/loaders/single_file_utils.py @@ -462,8 +462,8 @@ def create_unet_diffusers_config(original_config, image_size: int): config = { "sample_size": image_size // vae_scale_factor, "in_channels": unet_params["in_channels"], - "down_block_types": tuple(down_block_types), - "block_out_channels": tuple(block_out_channels), + "down_block_types": down_block_types, + "block_out_channels": block_out_channels, "layers_per_block": unet_params["num_res_blocks"], "cross_attention_dim": context_dim, "attention_head_dim": head_dim, @@ -482,7 +482,7 @@ def create_unet_diffusers_config(original_config, image_size: int): config["num_class_embeds"] = unet_params["num_classes"] config["out_channels"] = unet_params["out_channels"] - config["up_block_types"] = tuple(up_block_types) + config["up_block_types"] = up_block_types return config @@ -530,9 +530,9 @@ def create_vae_diffusers_config(original_config, image_size, scaling_factor=None "sample_size": image_size, "in_channels": vae_params["in_channels"], "out_channels": vae_params["out_ch"], - "down_block_types": tuple(down_block_types), - "up_block_types": tuple(up_block_types), - "block_out_channels": tuple(block_out_channels), + "down_block_types": down_block_types, + "up_block_types": up_block_types, + "block_out_channels": block_out_channels, "latent_channels": vae_params["z_channels"], "layers_per_block": vae_params["num_res_blocks"], "scaling_factor": scaling_factor, diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py index 1b83e23513..a9acebb5c7 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py @@ -1088,6 +1088,8 @@ class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): for param_name, param_value in single_file_pipe.unet.config.items(): if param_name in PARAMS_TO_IGNORE: continue + if param_name == "upcast_attention" and pipe.unet.config[param_name] is None: + pipe.unet.config[param_name] = False assert ( pipe.unet.config[param_name] == param_value ), f"{param_name} is differs between single file loading and pretrained loading"