From 8018a6a733fa2891cc637a5eeab1ba4ff8d1be66 Mon Sep 17 00:00:00 2001 From: DN6 Date: Tue, 7 Oct 2025 17:45:42 +0530 Subject: [PATCH] update --- src/diffusers/models/modeling_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py index bbd8f9b225..9ba04bb556 100644 --- a/src/diffusers/models/modeling_utils.py +++ b/src/diffusers/models/modeling_utils.py @@ -1524,10 +1524,11 @@ class ModelMixin(torch.nn.Module, PushToHubMixin): attention_backend = AttentionBackendName(attention_backend) if not _AttentionBackendRegistry._is_context_parallel_available(attention_backend): + compatible_backends = sorted(_AttentionBackendRegistry._supports_context_parallel) raise ValueError( f"Context parallelism is enabled but the attention processor '{processor.__class__.__name__}' " f"is using backend '{attention_backend.value}' which does not support context parallelism. " - f"Please set a compatible attention backend: {_AttentionBackendRegistry._supports_context_parallel} using `model.set_attention_backend()` before " + f"Please set a compatible attention backend: {compatible_backends}) using `model.set_attention_backend()` before " f"calling `enable_parallelism()`." )