From e29dc972156c8e027dc779f08f2f786a470ab885 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 20 Dec 2022 01:38:45 +0100 Subject: [PATCH] make style --- src/diffusers/models/attention.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index 60f0c8bf30..6f81373376 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -297,8 +297,8 @@ class AttentionBlock(nn.Module): ) elif not torch.cuda.is_available(): raise ValueError( - "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is only" - " available for GPU " + "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" + " only available for GPU " ) else: try: @@ -461,8 +461,8 @@ class BasicTransformerBlock(nn.Module): ) elif not torch.cuda.is_available(): raise ValueError( - "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is only" - " available for GPU " + "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" + " only available for GPU " ) else: try: