From 06ee4db3e7a5342871404ae445cf71665bc6a580 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 3 Jul 2024 13:11:00 +0530 Subject: [PATCH] [Chore] add dummy lora attention processors to prevent failures in other libs (#8777) add dummy lora attention processors to prevent failures in other libs --- src/diffusers/models/attention_processor.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index 8784dcda4b..9d495695e3 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -2775,6 +2775,26 @@ class PAGCFGIdentitySelfAttnProcessor2_0: return hidden_states +class LoRAAttnProcessor: + def __init__(self): + pass + + +class LoRAAttnProcessor2_0: + def __init__(self): + pass + + +class LoRAXFormersAttnProcessor: + def __init__(self): + pass + + +class LoRAAttnAddedKVProcessor: + def __init__(self): + pass + + ADDED_KV_ATTENTION_PROCESSORS = ( AttnAddedKVProcessor, SlicedAttnAddedKVProcessor,