From 511c7a4c40da90b739cf0766d6050bff42e27227 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 27 Nov 2025 12:04:29 +0530 Subject: [PATCH] up --- .../pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py b/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py index 9fcc4a6e6e..df609d2f67 100644 --- a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +++ b/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py @@ -689,10 +689,7 @@ class StableDiffusionXLControlNetXSPipeline( self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, - ( - AttnProcessor2_0, - XFormersAttnProcessor, - ), + (AttnProcessor2_0, XFormersAttnProcessor), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory