diff --git a/scripts/convert_ltx2_to_diffusers.py b/scripts/convert_ltx2_to_diffusers.py index d1384c1dca..eb311c3bc0 100644 --- a/scripts/convert_ltx2_to_diffusers.py +++ b/scripts/convert_ltx2_to_diffusers.py @@ -1,4 +1,5 @@ import argparse +import math import os from contextlib import nullcontext from typing import Any, Dict, Optional, Tuple @@ -742,7 +743,8 @@ def main(args): if args.full_pipeline: scheduler = FlowMatchEulerDiscreteScheduler( - use_dynamic_shifting=True, + use_dynamic_shifting=False, + shift=math.exp(2.05), # Equivalent to dynamic shift if always using max_image_seq_len base_shift=0.95, max_shift=2.05, base_image_seq_len=1024, diff --git a/scripts/ltx2_test_full_pipeline.py b/scripts/ltx2_test_full_pipeline.py index 14b02a490f..37a649d5ea 100644 --- a/scripts/ltx2_test_full_pipeline.py +++ b/scripts/ltx2_test_full_pipeline.py @@ -1,4 +1,5 @@ import argparse +import math import os from fractions import Fraction from typing import Optional @@ -6,7 +7,7 @@ from typing import Optional import av # Needs to be installed separately (`pip install av`) import torch -from diffusers import LTX2Pipeline +from diffusers import LTX2Pipeline, FlowMatchEulerDiscreteScheduler # Video export functions copied from original LTX 2.0 code @@ -150,6 +151,7 @@ def parse_args(): parser.add_argument("--frame_rate", type=float, default=25.0) parser.add_argument("--guidance_scale", type=float, default=3.0) parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--apply_scheduler_fix", action="store_true") parser.add_argument("--device", type=str, default="cuda:0") parser.add_argument("--dtype", type=str, default="bf16") @@ -179,6 +181,15 @@ def main(args): revision=args.revision, torch_dtype=args.dtype, ) + if args.apply_scheduler_fix: + max_shift = pipeline.scheduler.config.max_shift + time_shift_type = pipeline.scheduler.config.time_shift_type + fixed_scheduler = FlowMatchEulerDiscreteScheduler.from_config( + pipeline.scheduler.config, + use_dynamic_shifting=False, + shift=math.exp(max_shift) if time_shift_type == "exponential" else max_shift, + ) + pipeline.scheduler = fixed_scheduler pipeline.to(device=args.device) if args.cpu_offload: pipeline.enable_model_cpu_offload()