From 1cd09cccf32b5e3e04ae8b57922a469073fb4d6f Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Tue, 22 Nov 2022 11:55:03 +0100 Subject: [PATCH] Making hidden_state contiguous before applying multiple linear layers --- src/diffusers/models/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index ad64e30d1f..6c75baab7f 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -277,7 +277,7 @@ class AttentionBlock(nn.Module): # norm hidden_states = self.group_norm(hidden_states) - hidden_states = hidden_states.view(batch, channel, height * width).transpose(1, 2) + hidden_states = hidden_states.view(batch, channel, height * width).transpose(1, 2).contiguous() # proj to q, k, v query_proj = self.query(hidden_states)