From 9b06afba6b446352b9249a7f632af388174dd6ba Mon Sep 17 00:00:00 2001 From: Lev Novitskiy <57654885+leffff@users.noreply.github.com> Date: Thu, 16 Oct 2025 09:54:00 +0300 Subject: [PATCH] Update src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py Co-authored-by: YiYi Xu --- src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py index 3d0d68cbe9..58ba3270a5 100644 --- a/src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py @@ -314,7 +314,7 @@ class Kandinsky5T2VPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin): attention_mask = inputs["attention_mask"][:, crop_start:] cu_seqlens = torch.cumsum(attention_mask.sum(1), dim=0) cu_seqlens = torch.cat([torch.zeros_like(cu_seqlens)[:1], cu_seqlens]).to(dtype=torch.int32) - embeds = torch.cat([embeds[i].unsqueeze(dim=0).repeat(num_videos_per_prompt, 1, 1) for i in range(batch_size)], dim=0) + embeds = embeds.repeat_interleave(num_videos_per_prompt, dim=0) return embeds.to(dtype), cu_seqlens