From 1066de8c699db994ecd6beadd7d5293ffc3ead49 Mon Sep 17 00:00:00 2001 From: Linoy Tsaban <57615435+linoytsaban@users.noreply.github.com> Date: Tue, 7 Oct 2025 18:27:15 +0300 Subject: [PATCH] [Qwen LoRA training] fix bug when offloading (#12440) * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled * fix bug when offload and cache_latents both enabled --- examples/dreambooth/train_dreambooth_lora_qwen_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/dreambooth/train_dreambooth_lora_qwen_image.py b/examples/dreambooth/train_dreambooth_lora_qwen_image.py index 75eae92dfb..56de160d6f 100644 --- a/examples/dreambooth/train_dreambooth_lora_qwen_image.py +++ b/examples/dreambooth/train_dreambooth_lora_qwen_image.py @@ -1338,7 +1338,7 @@ def main(args): batch["pixel_values"] = batch["pixel_values"].to( accelerator.device, non_blocking=True, dtype=vae.dtype ) - latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) + latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) if train_dataset.custom_instance_prompts: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): prompt_embeds, prompt_embeds_mask = compute_text_embeddings(