1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Improve CPU offload support

This commit is contained in:
Daniel Gu
2025-12-23 10:56:32 +01:00
parent 90edc6abc9
commit 1484c43183
2 changed files with 4 additions and 1 deletions

View File

@@ -153,6 +153,7 @@ def parse_args():
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--dtype", type=str, default="bf16")
parser.add_argument("--cpu_offload", action="store_true")
parser.add_argument(
"--output_dir",
@@ -179,6 +180,8 @@ def main(args):
torch_dtype=args.dtype,
)
pipeline.to(device=args.device)
if args.cpu_offload:
pipeline.enable_model_cpu_offload()
video, audio = pipeline(
prompt=args.prompt,

View File

@@ -194,7 +194,7 @@ class LTX2Pipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMix
[T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
"""
model_cpu_offload_seq = "text_encoder->transformer->vae"
model_cpu_offload_seq = "text_encoder->transformer->vae->audio_vae->vocoder"
_optional_components = []
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]