From 6cf0be5d3d935b98d5a0948cd269e6c59bb7cf0a Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 12 Jun 2024 23:25:18 +0100 Subject: [PATCH] fix warning log for Transformer SD3 (#8496) fix warning log --- src/diffusers/models/transformers/transformer_sd3.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_sd3.py b/src/diffusers/models/transformers/transformer_sd3.py index b6efae553a..4b159511e2 100644 --- a/src/diffusers/models/transformers/transformer_sd3.py +++ b/src/diffusers/models/transformers/transformer_sd3.py @@ -282,9 +282,10 @@ class SD3Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOrigi # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: - logger.warning( - "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." - ) + if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: + logger.warning( + "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." + ) height, width = hidden_states.shape[-2:]