diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py index c7619e850b..6c4312a3c9 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py @@ -1215,7 +1215,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, " "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) @@ -1366,14 +1366,14 @@ def main(args): # Optimizer creation if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): - logger.warn( + logger.warning( f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." "Defaulting to adamW" ) args.optimizer = "adamw" if args.use_8bit_adam and not args.optimizer.lower() == "adamw": - logger.warn( + logger.warning( f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"set to {args.optimizer.lower()}" ) @@ -1407,11 +1407,11 @@ def main(args): optimizer_class = prodigyopt.Prodigy if args.learning_rate <= 0.1: - logger.warn( + logger.warning( "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" ) if args.train_text_encoder and args.text_encoder_lr: - logger.warn( + logger.warning( f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:" f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " f"When using prodigy only learning_rate is used as the initial learning rate." diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py index f49da70938..219f2b6910 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py @@ -1317,7 +1317,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, " "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) @@ -1522,14 +1522,14 @@ def main(args): # Optimizer creation if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): - logger.warn( + logger.warning( f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." "Defaulting to adamW" ) args.optimizer = "adamw" if args.use_8bit_adam and not args.optimizer.lower() == "adamw": - logger.warn( + logger.warning( f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"set to {args.optimizer.lower()}" ) @@ -1563,11 +1563,11 @@ def main(args): optimizer_class = prodigyopt.Prodigy if args.learning_rate <= 0.1: - logger.warn( + logger.warning( "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" ) if args.train_text_encoder and args.text_encoder_lr: - logger.warn( + logger.warning( f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:" f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " f"When using prodigy only learning_rate is used as the initial learning rate." diff --git a/examples/community/pipeline_stable_diffusion_xl_instantid.py b/examples/community/pipeline_stable_diffusion_xl_instantid.py index c777403c46..bf5fd8b0b7 100644 --- a/examples/community/pipeline_stable_diffusion_xl_instantid.py +++ b/examples/community/pipeline_stable_diffusion_xl_instantid.py @@ -452,7 +452,7 @@ class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) self.enable_xformers_memory_efficient_attention() diff --git a/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py b/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py index 172aae98ac..ca37338c4f 100644 --- a/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py +++ b/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py @@ -308,7 +308,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step): tracker.log({"validation": formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -1068,7 +1068,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py b/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py index 815584ed52..55741ceda0 100644 --- a/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py +++ b/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py @@ -180,7 +180,7 @@ def log_validation(vae, args, accelerator, weight_dtype, step, unet=None, is_fin logger_name = "test" if is_final_validation else "validation" tracker.log({logger_name: formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -928,7 +928,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py b/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py index c76d9713b4..0eefa68ccb 100644 --- a/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py +++ b/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py @@ -325,7 +325,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step): tracker.log({"validation": formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -1083,7 +1083,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/consistency_distillation/train_lcm_distill_sd_wds.py b/examples/consistency_distillation/train_lcm_distill_sd_wds.py index 35a3426b33..5a82ae7d7a 100644 --- a/examples/consistency_distillation/train_lcm_distill_sd_wds.py +++ b/examples/consistency_distillation/train_lcm_distill_sd_wds.py @@ -285,7 +285,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe tracker.log({f"validation/{name}": formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -1023,7 +1023,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py b/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py index 9507107b49..15cb99f325 100644 --- a/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py +++ b/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py @@ -303,7 +303,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe tracker.log({f"validation/{name}": formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -1083,7 +1083,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/controlnet/train_controlnet.py b/examples/controlnet/train_controlnet.py index 72aae40c7d..bad54a7153 100644 --- a/examples/controlnet/train_controlnet.py +++ b/examples/controlnet/train_controlnet.py @@ -178,7 +178,7 @@ def log_validation( tracker.log({tracker_key: formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -861,7 +861,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/controlnet/train_controlnet_flax.py b/examples/controlnet/train_controlnet_flax.py index 285227aa23..6a4e71a2e8 100644 --- a/examples/controlnet/train_controlnet_flax.py +++ b/examples/controlnet/train_controlnet_flax.py @@ -128,7 +128,7 @@ def log_validation(pipeline, pipeline_params, controlnet_params, tokenizer, args wandb.log({"validation": formatted_images}) else: - logger.warn(f"image logging not implemented for {args.report_to}") + logger.warning(f"image logging not implemented for {args.report_to}") return image_logs diff --git a/examples/controlnet/train_controlnet_sdxl.py b/examples/controlnet/train_controlnet_sdxl.py index a16ed1129b..c123d95d31 100644 --- a/examples/controlnet/train_controlnet_sdxl.py +++ b/examples/controlnet/train_controlnet_sdxl.py @@ -178,7 +178,7 @@ def log_validation(vae, unet, controlnet, args, accelerator, weight_dtype, step, tracker.log({tracker_key: formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -929,7 +929,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/custom_diffusion/train_custom_diffusion.py b/examples/custom_diffusion/train_custom_diffusion.py index 790454be7a..d80e3388a6 100644 --- a/examples/custom_diffusion/train_custom_diffusion.py +++ b/examples/custom_diffusion/train_custom_diffusion.py @@ -904,7 +904,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) attention_class = CustomDiffusionXFormersAttnProcessor diff --git a/examples/dreambooth/train_dreambooth.py b/examples/dreambooth/train_dreambooth.py index e61afafd52..b5a68be576 100644 --- a/examples/dreambooth/train_dreambooth.py +++ b/examples/dreambooth/train_dreambooth.py @@ -987,7 +987,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/dreambooth/train_dreambooth_lora.py b/examples/dreambooth/train_dreambooth_lora.py index f3f1f52cf4..2a905fdc03 100644 --- a/examples/dreambooth/train_dreambooth_lora.py +++ b/examples/dreambooth/train_dreambooth_lora.py @@ -895,7 +895,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/dreambooth/train_dreambooth_lora_sdxl.py b/examples/dreambooth/train_dreambooth_lora_sdxl.py index 96d7c3168d..e2d3129d15 100644 --- a/examples/dreambooth/train_dreambooth_lora_sdxl.py +++ b/examples/dreambooth/train_dreambooth_lora_sdxl.py @@ -1141,7 +1141,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, " "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) @@ -1317,14 +1317,14 @@ def main(args): # Optimizer creation if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): - logger.warn( + logger.warning( f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." "Defaulting to adamW" ) args.optimizer = "adamw" if args.use_8bit_adam and not args.optimizer.lower() == "adamw": - logger.warn( + logger.warning( f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"set to {args.optimizer.lower()}" ) @@ -1358,11 +1358,11 @@ def main(args): optimizer_class = prodigyopt.Prodigy if args.learning_rate <= 0.1: - logger.warn( + logger.warning( "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" ) if args.train_text_encoder and args.text_encoder_lr: - logger.warn( + logger.warning( f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:" f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " f"When using prodigy only learning_rate is used as the initial learning rate." diff --git a/examples/instruct_pix2pix/train_instruct_pix2pix.py b/examples/instruct_pix2pix/train_instruct_pix2pix.py index a8b445d103..6fa7a28b49 100644 --- a/examples/instruct_pix2pix/train_instruct_pix2pix.py +++ b/examples/instruct_pix2pix/train_instruct_pix2pix.py @@ -488,7 +488,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py b/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py index 4a681e4742..36ad9b0d98 100644 --- a/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py +++ b/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py @@ -580,7 +580,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py b/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py index 14cedb2818..1d084dce0f 100644 --- a/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py +++ b/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py @@ -177,7 +177,7 @@ def log_validation(vae, image_encoder, image_processor, unet, args, accelerator, } ) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() @@ -534,7 +534,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py b/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py index 6697a45a4d..5e7879a54e 100644 --- a/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py +++ b/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py @@ -180,7 +180,7 @@ def log_validation( } ) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() diff --git a/examples/research_projects/consistency_training/train_cm_ct_unconditional.py b/examples/research_projects/consistency_training/train_cm_ct_unconditional.py index 1028dacae9..947de230b6 100644 --- a/examples/research_projects/consistency_training/train_cm_ct_unconditional.py +++ b/examples/research_projects/consistency_training/train_cm_ct_unconditional.py @@ -219,7 +219,7 @@ def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name= if args.num_classes is not None: class_labels = list(range(args.num_classes)) else: - logger.warn( + logger.warning( "The model is class-conditional but the number of classes is not set. The generated images will be" " unconditional rather than class-conditional." ) @@ -266,7 +266,7 @@ def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name= tracker.log({f"validation/{name}": formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -863,14 +863,14 @@ def main(args): elif args.model_config_name_or_path is None: # TODO: use default architectures from iCT paper if not args.class_conditional and (args.num_classes is not None or args.class_embed_type is not None): - logger.warn( + logger.warning( f"`--class_conditional` is set to `False` but `--num_classes` is set to {args.num_classes} and" f" `--class_embed_type` is set to {args.class_embed_type}. These values will be overridden to `None`." ) args.num_classes = None args.class_embed_type = None elif args.class_conditional and args.num_classes is None and args.class_embed_type is None: - logger.warn( + logger.warning( "`--class_conditional` is set to `True` but neither `--num_classes` nor `--class_embed_type` is set." "`class_conditional` will be overridden to `False`." ) @@ -996,7 +996,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/controlnet/train_controlnet_webdataset.py b/examples/research_projects/controlnet/train_controlnet_webdataset.py index 9484746d6c..2b397d27d6 100644 --- a/examples/research_projects/controlnet/train_controlnet_webdataset.py +++ b/examples/research_projects/controlnet/train_controlnet_webdataset.py @@ -407,7 +407,7 @@ def log_validation(vae, unet, controlnet, args, accelerator, weight_dtype, step) tracker.log({"validation": formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -1057,7 +1057,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py b/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py index c0d201671e..4bb6b89447 100644 --- a/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py +++ b/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py @@ -574,7 +574,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/diffusion_dpo/train_diffusion_dpo_sdxl.py b/examples/research_projects/diffusion_dpo/train_diffusion_dpo_sdxl.py index 9ec3e99159..24d51658e3 100644 --- a/examples/research_projects/diffusion_dpo/train_diffusion_dpo_sdxl.py +++ b/examples/research_projects/diffusion_dpo/train_diffusion_dpo_sdxl.py @@ -672,7 +672,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py b/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py index 8589caffc8..436706c851 100644 --- a/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py +++ b/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py @@ -516,7 +516,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py b/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py index 377497ff3c..3cfd728214 100644 --- a/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py +++ b/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py @@ -608,7 +608,7 @@ def main(): # Create the pipeline using using the trained modules and save it. if accelerator.is_main_process: if args.push_to_hub and args.only_save_embeds: - logger.warn("Enabling full model saving because --push_to_hub=True was specified.") + logger.warning("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = not args.only_save_embeds diff --git a/examples/research_projects/lora/train_text_to_image_lora.py b/examples/research_projects/lora/train_text_to_image_lora.py index 1861d7ef14..462c3bbd44 100644 --- a/examples/research_projects/lora/train_text_to_image_lora.py +++ b/examples/research_projects/lora/train_text_to_image_lora.py @@ -541,7 +541,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/multi_token_textual_inversion/textual_inversion.py b/examples/research_projects/multi_token_textual_inversion/textual_inversion.py index 0ca75545a8..5fab1b6e9c 100644 --- a/examples/research_projects/multi_token_textual_inversion/textual_inversion.py +++ b/examples/research_projects/multi_token_textual_inversion/textual_inversion.py @@ -645,7 +645,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() @@ -901,7 +901,7 @@ def main(): accelerator.wait_for_everyone() if accelerator.is_main_process: if args.push_to_hub and args.only_save_embeds: - logger.warn("Enabling full model saving because --push_to_hub=True was specified.") + logger.warning("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = not args.only_save_embeds diff --git a/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py b/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py index 7e18e6f8dd..2045ef4197 100644 --- a/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py +++ b/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py @@ -108,7 +108,7 @@ def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight } ) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() @@ -523,7 +523,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py index d98009d6bb..5d774d591d 100644 --- a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py +++ b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py @@ -687,7 +687,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() @@ -916,7 +916,7 @@ def main(): accelerator.wait_for_everyone() if accelerator.is_main_process: if args.push_to_hub and not args.save_as_full_pipeline: - logger.warn("Enabling full model saving because --push_to_hub=True was specified.") + logger.warning("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = args.save_as_full_pipeline diff --git a/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py b/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py index 0dabf66480..8b6f7ab3a0 100644 --- a/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py +++ b/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py @@ -410,7 +410,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) model.enable_xformers_memory_efficient_attention() diff --git a/examples/research_projects/realfill/train_realfill.py b/examples/research_projects/realfill/train_realfill.py index fb75fe620d..c7cc25df02 100644 --- a/examples/research_projects/realfill/train_realfill.py +++ b/examples/research_projects/realfill/train_realfill.py @@ -629,7 +629,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/t2i_adapter/train_t2i_adapter_sdxl.py b/examples/t2i_adapter/train_t2i_adapter_sdxl.py index 00190975c2..73ee64b1d5 100644 --- a/examples/t2i_adapter/train_t2i_adapter_sdxl.py +++ b/examples/t2i_adapter/train_t2i_adapter_sdxl.py @@ -167,7 +167,7 @@ def log_validation(vae, unet, adapter, args, accelerator, weight_dtype, step): tracker.log({"validation": formatted_images}) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() @@ -932,7 +932,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/text_to_image/train_text_to_image.py b/examples/text_to_image/train_text_to_image.py index 90c6761c5d..a933802e28 100644 --- a/examples/text_to_image/train_text_to_image.py +++ b/examples/text_to_image/train_text_to_image.py @@ -183,7 +183,7 @@ def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight } ) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() @@ -608,7 +608,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/text_to_image/train_text_to_image_lora.py b/examples/text_to_image/train_text_to_image_lora.py index a6bf8f15ce..ba1b3b840e 100644 --- a/examples/text_to_image/train_text_to_image_lora.py +++ b/examples/text_to_image/train_text_to_image_lora.py @@ -497,7 +497,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/text_to_image/train_text_to_image_lora_sdxl.py b/examples/text_to_image/train_text_to_image_lora_sdxl.py index 55b4017201..c4d7a8b045 100644 --- a/examples/text_to_image/train_text_to_image_lora_sdxl.py +++ b/examples/text_to_image/train_text_to_image_lora_sdxl.py @@ -616,7 +616,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/text_to_image/train_text_to_image_sdxl.py b/examples/text_to_image/train_text_to_image_sdxl.py index dc1df5f745..de2c67e2eb 100644 --- a/examples/text_to_image/train_text_to_image_sdxl.py +++ b/examples/text_to_image/train_text_to_image_sdxl.py @@ -712,7 +712,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index e080e01a49..d91db3368f 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -708,7 +708,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() @@ -966,7 +966,7 @@ def main(): accelerator.wait_for_everyone() if accelerator.is_main_process: if args.push_to_hub and not args.save_as_full_pipeline: - logger.warn("Enabling full model saving because --push_to_hub=True was specified.") + logger.warning("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = args.save_as_full_pipeline diff --git a/examples/textual_inversion/textual_inversion_sdxl.py b/examples/textual_inversion/textual_inversion_sdxl.py index df666abce5..7c1a96e2cc 100644 --- a/examples/textual_inversion/textual_inversion_sdxl.py +++ b/examples/textual_inversion/textual_inversion_sdxl.py @@ -711,7 +711,7 @@ def main(): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() @@ -1022,7 +1022,7 @@ def main(): ) if args.push_to_hub and not args.save_as_full_pipeline: - logger.warn("Enabling full model saving because --push_to_hub=True was specified.") + logger.warning("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = args.save_as_full_pipeline diff --git a/examples/unconditional_image_generation/train_unconditional.py b/examples/unconditional_image_generation/train_unconditional.py index 3c6041b169..79572414c7 100644 --- a/examples/unconditional_image_generation/train_unconditional.py +++ b/examples/unconditional_image_generation/train_unconditional.py @@ -408,7 +408,7 @@ def main(args): xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): - logger.warn( + logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) model.enable_xformers_memory_efficient_attention() diff --git a/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py b/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py index 445760019f..81478b48bf 100644 --- a/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py +++ b/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py @@ -184,7 +184,7 @@ def log_validation(text_encoder, tokenizer, prior, args, accelerator, weight_dty } ) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() diff --git a/examples/wuerstchen/text_to_image/train_text_to_image_prior.py b/examples/wuerstchen/text_to_image/train_text_to_image_prior.py index ad880878b0..f5a7309e1f 100644 --- a/examples/wuerstchen/text_to_image/train_text_to_image_prior.py +++ b/examples/wuerstchen/text_to_image/train_text_to_image_prior.py @@ -182,7 +182,7 @@ def log_validation(text_encoder, tokenizer, prior, args, accelerator, weight_dty } ) else: - logger.warn(f"image logging not implemented for {tracker.name}") + logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() diff --git a/src/diffusers/loaders/lora.py b/src/diffusers/loaders/lora.py index 964087e0e0..c6077f3a8e 100644 --- a/src/diffusers/loaders/lora.py +++ b/src/diffusers/loaders/lora.py @@ -430,7 +430,7 @@ class LoraLoaderMixin: # contain the module names of the `unet` as its keys WITHOUT any prefix. if not USE_PEFT_BACKEND: warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet.{module_name}': params for module_name, params in old_state_dict.items()}`." - logger.warn(warn_message) + logger.warning(warn_message) if len(state_dict.keys()) > 0: if adapter_name in getattr(unet, "peft_config", {}): @@ -882,7 +882,7 @@ class LoraLoaderMixin: if fuse_unet or fuse_text_encoder: self.num_fused_loras += 1 if self.num_fused_loras > 1: - logger.warn( + logger.warning( "The current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.", ) diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py index e844a1cafa..32f47bb23f 100644 --- a/src/diffusers/loaders/single_file_utils.py +++ b/src/diffusers/loaders/single_file_utils.py @@ -884,7 +884,7 @@ def create_diffusers_controlnet_model_from_ldm( unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: - logger.warn( + logger.warning( f"Some weights of the model checkpoint were not used when initializing {controlnet.__name__}: \n {[', '.join(unexpected_keys)]}" ) else: @@ -1060,7 +1060,7 @@ def create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_ unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: - logger.warn( + logger.warning( f"Some weights of the model checkpoint were not used when initializing {text_model.__class__.__name__}: \n {[', '.join(unexpected_keys)]}" ) else: @@ -1155,7 +1155,7 @@ def create_text_encoder_from_open_clip_checkpoint( unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: - logger.warn( + logger.warning( f"Some weights of the model checkpoint were not used when initializing {text_model.__class__.__name__}: \n {[', '.join(unexpected_keys)]}" ) @@ -1221,7 +1221,7 @@ def create_diffusers_unet_model_from_ldm( unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: - logger.warn( + logger.warning( f"Some weights of the model checkpoint were not used when initializing {unet.__name__}: \n {[', '.join(unexpected_keys)]}" ) else: @@ -1283,7 +1283,7 @@ def create_diffusers_vae_model_from_ldm( unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: - logger.warn( + logger.warning( f"Some weights of the model checkpoint were not used when initializing {vae.__name__}: \n {[', '.join(unexpected_keys)]}" ) else: diff --git a/src/diffusers/loaders/unet.py b/src/diffusers/loaders/unet.py index 9d8e2666c5..e9f2cb2ed1 100644 --- a/src/diffusers/loaders/unet.py +++ b/src/diffusers/loaders/unet.py @@ -345,7 +345,7 @@ class UNet2DConditionLoadersMixin: is_model_cpu_offload = False is_sequential_cpu_offload = False - # For PEFT backend the Unet is already offloaded at this stage as it is handled inside `lora_lora_weights_into_unet` + # For PEFT backend the Unet is already offloaded at this stage as it is handled inside `load_lora_weights_into_unet` if not USE_PEFT_BACKEND: if _pipeline is not None: for _, component in _pipeline.components.items(): @@ -384,7 +384,7 @@ class UNet2DConditionLoadersMixin: is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys()) if is_text_encoder_present: warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)." - logger.warn(warn_message) + logger.warning(warn_message) unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)] state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys} diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py index 03768e875a..73ea5fb07e 100644 --- a/src/diffusers/models/modeling_utils.py +++ b/src/diffusers/models/modeling_utils.py @@ -677,7 +677,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin): unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: - logger.warn( + logger.warning( f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" ) @@ -705,7 +705,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin): # the weights so we don't have to do this again. if "'Attention' object has no attribute" in str(e): - logger.warn( + logger.warning( f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}" " was saved with deprecated attention block weight names. We will load it with the deprecated attention block" " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion," diff --git a/src/diffusers/models/unets/unet_2d_blocks.py b/src/diffusers/models/unets/unet_2d_blocks.py index 9ebf6982ca..a0ec2a1166 100644 --- a/src/diffusers/models/unets/unet_2d_blocks.py +++ b/src/diffusers/models/unets/unet_2d_blocks.py @@ -69,7 +69,7 @@ def get_down_block( ): # If attn head dim is not defined, we default it to the number of heads if attention_head_dim is None: - logger.warn( + logger.warning( f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}." ) attention_head_dim = num_attention_heads @@ -354,7 +354,7 @@ def get_up_block( ) -> nn.Module: # If attn head dim is not defined, we default it to the number of heads if attention_head_dim is None: - logger.warn( + logger.warning( f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}." ) attention_head_dim = num_attention_heads @@ -673,7 +673,7 @@ class UNetMidBlock2D(nn.Module): attentions = [] if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." ) attention_head_dim = in_channels @@ -1035,7 +1035,7 @@ class AttnDownBlock2D(nn.Module): self.downsample_type = downsample_type if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels @@ -1480,7 +1480,7 @@ class AttnDownEncoderBlock2D(nn.Module): attentions = [] if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels @@ -1579,7 +1579,7 @@ class AttnSkipDownBlock2D(nn.Module): self.resnets = nn.ModuleList([]) if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels @@ -2244,7 +2244,7 @@ class AttnUpBlock2D(nn.Module): self.upsample_type = upsample_type if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels @@ -2719,7 +2719,7 @@ class AttnUpDecoderBlock2D(nn.Module): attentions = [] if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." ) attention_head_dim = out_channels @@ -2841,7 +2841,7 @@ class AttnSkipUpBlock2D(nn.Module): ) if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." ) attention_head_dim = out_channels diff --git a/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py b/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py index b7e9428fe4..5b6fc2b393 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +++ b/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py @@ -156,7 +156,7 @@ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline): self.dtype = dtype if safety_checker is None: - logger.warn( + logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" diff --git a/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py b/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py index 64806d783d..7adf9e9c47 100644 --- a/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py +++ b/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py @@ -416,13 +416,13 @@ class IFPipeline(DiffusionPipeline, LoraLoaderMixin): def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): diff --git a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py index 6ec4ce6f11..ccc7b1d151 100644 --- a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +++ b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py @@ -460,13 +460,13 @@ class IFImg2ImgPipeline(DiffusionPipeline, LoraLoaderMixin): # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): diff --git a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py index d59c2b533d..b4ce5831a5 100644 --- a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +++ b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py @@ -175,7 +175,7 @@ class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): ) if unet.config.in_channels != 6: - logger.warn( + logger.warning( "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." ) @@ -209,13 +209,13 @@ class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): diff --git a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py index 1dbb5e92ec..180e5309c5 100644 --- a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +++ b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py @@ -500,13 +500,13 @@ class IFInpaintingPipeline(DiffusionPipeline, LoraLoaderMixin): # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): diff --git a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py index cb9200cffc..b67907c1c1 100644 --- a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +++ b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py @@ -177,7 +177,7 @@ class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): ) if unet.config.in_channels != 6: - logger.warn( + logger.warning( "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." ) @@ -211,13 +211,13 @@ class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): diff --git a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py index 2b48f5887c..a293343ebe 100644 --- a/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +++ b/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py @@ -133,7 +133,7 @@ class IFSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): ) if unet.config.in_channels != 6: - logger.warn( + logger.warning( "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." ) @@ -167,13 +167,13 @@ class IFSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py index 97c533be58..f802a37de4 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py @@ -2158,7 +2158,7 @@ class UNetMidBlockFlat(nn.Module): attentions = [] if attention_head_dim is None: - logger.warn( + logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." ) attention_head_dim = in_channels diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index 0555cbe754..d8d9e96e6f 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -481,7 +481,7 @@ class KandinskyInpaintPipeline(DiffusionPipeline): if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( "0.23.0.dev0" ): - logger.warn( + logger.warning( "Please note that the expected format of `mask_image` has recently been changed. " "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 4983ed7218..2fb8731f8a 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -372,7 +372,7 @@ class KandinskyV22InpaintPipeline(DiffusionPipeline): if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( "0.23.0.dev0" ): - logger.warn( + logger.warning( "Please note that the expected format of `mask_image` has recently been changed. " "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index fa706ea57d..341360d4f7 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -256,7 +256,9 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): break if save_method_name is None: - logger.warn(f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved.") + logger.warning( + f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved." + ) # make sure that unsaveable components are not tried to be loaded afterward self.register_to_config(**{pipeline_component_name: (None, None)}) continue @@ -1202,7 +1204,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): try: info = model_info(pretrained_model_name, token=token, revision=revision) except (HTTPError, OfflineModeIsEnabled, requests.ConnectionError) as e: - logger.warn(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.") + logger.warning(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.") local_files_only = True model_info_call_error = e # save error to reraise it if model is not cached locally @@ -1353,7 +1355,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): len(safetensors_variant_filenames) > 0 and safetensors_model_filenames != safetensors_variant_filenames ): - logger.warn( + logger.warning( f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." ) else: @@ -1366,7 +1368,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")} bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")} if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames: - logger.warn( + logger.warning( f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." ) diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py index c12bca90aa..e7213a38bc 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -514,13 +514,13 @@ class PixArtAlphaPipeline(DiffusionPipeline): # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py index 1d73d35f68..7792bc0975 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py @@ -147,7 +147,7 @@ class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline): self.dtype = dtype if safety_checker is None: - logger.warn( + logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py index 1333cb8257..afd8729047 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py @@ -82,7 +82,7 @@ class StableDiffusionImageVariationPipeline(DiffusionPipeline, StableDiffusionMi super().__init__() if safety_checker is None and requires_safety_checker: - logger.warn( + logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py b/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py index 9806df8452..543f81e4c3 100644 --- a/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py +++ b/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py @@ -319,13 +319,13 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): self.sample = None if not self.config.lower_order_final and num_inference_steps % self.config.solver_order != 0: - logger.warn( + logger.warning( "Changing scheduler {self.config} to have `lower_order_final` set to True to handle uneven amount of inference steps. Please make sure to always use an even number of `num_inference steps when using `lower_order_final=False`." ) self.register_to_config(lower_order_final=True) if not self.config.lower_order_final and self.config.final_sigmas_type == "zero": - logger.warn( + logger.warning( " `last_sigmas_type='zero'` is not supported for `lower_order_final=False`. Changing scheduler {self.config} to have `lower_order_final` set to True." ) self.register_to_config(lower_order_final=True)