From bfd6af8141ccc8fc19f45da38f349de5cd39fcdd Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 22 Jul 2025 15:41:32 +0300 Subject: [PATCH] Update nodes_model_loading.py --- nodes_model_loading.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes_model_loading.py b/nodes_model_loading.py index 05fd37e..c86a9e8 100644 --- a/nodes_model_loading.py +++ b/nodes_model_loading.py @@ -801,8 +801,8 @@ class WanVideoModelLoader: quantization = "fp8_e5m2_scaled" break - if "scaled_fp8" in sd and quantization != "fp8_e4m3fn_scaled": - raise ValueError("The model is a scaled fp8 model, please set quantization to 'fp8_e4m3fn_scaled'") + if "scaled_fp8" in sd and "scaled" not in quantization: + raise ValueError("The model is a scaled fp8 model, please set quantization to '_scaled'") if merge_loras and "scaled" in quantization: raise ValueError("scaled models currently do not support merging LoRAs, please disable merging or use a non-scaled model")