From 32a5d70c425dd4df7eca8b523723bedf4446adea Mon Sep 17 00:00:00 2001 From: Anton Lozhkov Date: Mon, 19 Dec 2022 12:43:30 +0100 Subject: [PATCH] Support attn2==None for xformers (#1759) --- src/diffusers/models/attention.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index f9e43e4dd5..1d359864ba 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -473,7 +473,8 @@ class BasicTransformerBlock(nn.Module): except Exception as e: raise e self.attn1._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers - self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers + if self.attn2 is not None: + self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, attention_mask=None): # 1. Self-Attention