1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00
This commit is contained in:
sayakpaul
2026-01-12 19:35:36 +05:30
parent e60485498b
commit 53a943dca6
4 changed files with 4 additions and 4 deletions

View File

@@ -543,7 +543,7 @@ class Cosmos2_5_PredictBasePipeline(DiffusionPipeline):
negative_prompt_embeds: torch.Tensor | None = None,
output_type: str | None = "pil",
return_dict: bool = True,
callback_on_step_end: Callable[[int, int, None], PipelineCallback, MultiPipelineCallbacks] | None = None,
callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None,
callback_on_step_end_tensor_inputs: list[str] = ["latents"],
max_sequence_length: int = 512,
conditional_frame_timestep: float = 0.1,

View File

@@ -585,7 +585,7 @@ class Kandinsky5I2IPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin):
negative_prompt_cu_seqlens: torch.Tensor | None = None,
output_type: str | None = "pil",
return_dict: bool = True,
callback_on_step_end: Callable[[int, int, None], PipelineCallback, MultiPipelineCallbacks] | None = None,
callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None,
callback_on_step_end_tensor_inputs: list[str] = ["latents"],
max_sequence_length: int = 1024,
):

View File

@@ -768,7 +768,7 @@ class Kandinsky5I2VPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin):
negative_prompt_cu_seqlens: torch.Tensor | None = None,
output_type: str | None = "pil",
return_dict: bool = True,
callback_on_step_end: Callable[[int, int, None], PipelineCallback, MultiPipelineCallbacks] | None = None,
callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None,
callback_on_step_end_tensor_inputs: list[str] = ["latents"],
max_sequence_length: int = 512,
):

View File

@@ -552,7 +552,7 @@ class Kandinsky5T2IPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin):
negative_prompt_cu_seqlens: torch.Tensor | None = None,
output_type: str | None = "pil",
return_dict: bool = True,
callback_on_step_end: Callable[[int, int, None], PipelineCallback, MultiPipelineCallbacks] | None = None,
callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None,
callback_on_step_end_tensor_inputs: list[str] = ["latents"],
max_sequence_length: int = 512,
):