1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-29 07:22:12 +03:00
This commit is contained in:
Dhruv Nair
2025-08-06 17:43:21 +02:00
parent ea77fdc4b4
commit 1b4af6b7ef
3 changed files with 12 additions and 27 deletions

View File

@@ -1697,11 +1697,6 @@ class StableDiffusionXLControlNetUnionInputStep(ModularPipelineBlocks):
InputParam("controlnet_conditioning_scale", default=1.0),
InputParam("guess_mode", default=False),
InputParam("num_images_per_prompt", default=1),
]
@property
def intermediate_inputs(self) -> List[InputParam]:
return [
InputParam(
"latents",
required=True,

View File

@@ -179,11 +179,6 @@ class StableDiffusionXLInpaintOverlayMaskStep(ModularPipelineBlocks):
InputParam("image"),
InputParam("mask_image"),
InputParam("padding_mask_crop"),
]
@property
def intermediate_inputs(self) -> List[str]:
return [
InputParam(
"images",
type_hint=Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]],

View File

@@ -663,12 +663,11 @@ class StableDiffusionXLVaeEncoderStep(ModularPipelineBlocks):
block_state.device = components._execution_device
block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype
block_state.image = components.image_processor.preprocess(
image = components.image_processor.preprocess(
block_state.image, height=block_state.height, width=block_state.width, **block_state.preprocess_kwargs
)
block_state.image = block_state.image.to(device=block_state.device, dtype=block_state.dtype)
block_state.batch_size = block_state.image.shape[0]
image = image.to(device=block_state.device, dtype=block_state.dtype)
block_state.batch_size = image.shape[0]
# if generator is a list, make sure the length of it matches the length of images (both should be batch_size)
if isinstance(block_state.generator, list) and len(block_state.generator) != block_state.batch_size:
@@ -677,9 +676,7 @@ class StableDiffusionXLVaeEncoderStep(ModularPipelineBlocks):
f" size of {block_state.batch_size}. Make sure the batch size matches the length of the generators."
)
block_state.image_latents = self._encode_vae_image(
components, image=block_state.image, generator=block_state.generator
)
block_state.image_latents = self._encode_vae_image(components, image=image, generator=block_state.generator)
self.set_block_state(state, block_state)
@@ -850,34 +847,32 @@ class StableDiffusionXLInpaintVaeEncoderStep(ModularPipelineBlocks):
block_state.crops_coords = None
block_state.resize_mode = "default"
block_state.image = components.image_processor.preprocess(
image = components.image_processor.preprocess(
block_state.image,
height=block_state.height,
width=block_state.width,
crops_coords=block_state.crops_coords,
resize_mode=block_state.resize_mode,
)
block_state.image = block_state.image.to(dtype=torch.float32)
image = image.to(dtype=torch.float32)
block_state.mask = components.mask_processor.preprocess(
mask = components.mask_processor.preprocess(
block_state.mask_image,
height=block_state.height,
width=block_state.width,
resize_mode=block_state.resize_mode,
crops_coords=block_state.crops_coords,
)
block_state.masked_image = block_state.image * (block_state.mask < 0.5)
block_state.masked_image = image * (mask < 0.5)
block_state.batch_size = block_state.image.shape[0]
block_state.image = block_state.image.to(device=block_state.device, dtype=block_state.dtype)
block_state.image_latents = self._encode_vae_image(
components, image=block_state.image, generator=block_state.generator
)
block_state.batch_size = image.shape[0]
image = image.to(device=block_state.device, dtype=block_state.dtype)
block_state.image_latents = self._encode_vae_image(components, image=image, generator=block_state.generator)
# 7. Prepare mask latent variables
block_state.mask, block_state.masked_image_latents = self.prepare_mask_latents(
components,
block_state.mask,
mask,
block_state.masked_image,
block_state.batch_size,
block_state.height,