mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
move all the sequential pipelines & auto pipelines to the blocks_presets.py
This commit is contained in:
@@ -358,7 +358,7 @@ except OptionalDependencyNotAvailable:
|
||||
else:
|
||||
_import_structure["modular_pipelines"].extend(
|
||||
[
|
||||
"StableDiffusionXLAutoPipeline",
|
||||
"StableDiffusionXLAutoBlocks",
|
||||
"StableDiffusionXLModularLoader",
|
||||
]
|
||||
)
|
||||
@@ -979,7 +979,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
|
||||
else:
|
||||
from .modular_pipelines import (
|
||||
StableDiffusionXLAutoPipeline,
|
||||
StableDiffusionXLAutoBlocks,
|
||||
StableDiffusionXLModularLoader,
|
||||
)
|
||||
from .pipelines import (
|
||||
|
||||
@@ -39,7 +39,7 @@ else:
|
||||
"InputParam",
|
||||
"OutputParam",
|
||||
]
|
||||
_import_structure["stable_diffusion_xl"] = ["StableDiffusionXLAutoPipeline", "StableDiffusionXLModularLoader"]
|
||||
_import_structure["stable_diffusion_xl"] = ["StableDiffusionXLAutoBlocks", "StableDiffusionXLModularLoader"]
|
||||
_import_structure["components_manager"] = ["ComponentsManager"]
|
||||
|
||||
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
@@ -68,7 +68,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
OutputParam,
|
||||
)
|
||||
from .stable_diffusion_xl import (
|
||||
StableDiffusionXLAutoPipeline,
|
||||
StableDiffusionXLAutoBlocks,
|
||||
StableDiffusionXLModularLoader,
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -21,24 +21,21 @@ except OptionalDependencyNotAvailable:
|
||||
|
||||
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
|
||||
else:
|
||||
_import_structure["decoders"] = ["StableDiffusionXLAutoDecodeStep"]
|
||||
_import_structure["encoders"] = [
|
||||
"StableDiffusionXLAutoIPAdapterStep",
|
||||
"StableDiffusionXLAutoVaeEncoderStep",
|
||||
"StableDiffusionXLTextEncoderStep",
|
||||
]
|
||||
_import_structure["modular_block_mappings"] = [
|
||||
_import_structure["encoders"] = ["StableDiffusionXLTextEncoderStep"]
|
||||
_import_structure["modular_blocks_presets"] = [
|
||||
"AUTO_BLOCKS",
|
||||
"CONTROLNET_BLOCKS",
|
||||
"CONTROLNET_UNION_BLOCKS",
|
||||
"IMAGE2IMAGE_BLOCKS",
|
||||
"INPAINT_BLOCKS",
|
||||
"IP_ADAPTER_BLOCKS",
|
||||
"SDXL_SUPPORTED_BLOCKS",
|
||||
"TEXT2IMAGE_BLOCKS",
|
||||
"StableDiffusionXLAutoDecodeStep",
|
||||
"StableDiffusionXLAutoIPAdapterStep",
|
||||
"StableDiffusionXLAutoVaeEncoderStep",
|
||||
"StableDiffusionXLAutoBlocks",
|
||||
]
|
||||
_import_structure["modular_loader"] = ["StableDiffusionXLModularLoader"]
|
||||
_import_structure["modular_pipeline_presets"] = ["StableDiffusionXLAutoPipeline"]
|
||||
|
||||
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
try:
|
||||
@@ -47,24 +44,23 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
||||
except OptionalDependencyNotAvailable:
|
||||
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
|
||||
else:
|
||||
from .decoders import StableDiffusionXLAutoDecodeStep
|
||||
from .encoders import (
|
||||
StableDiffusionXLAutoIPAdapterStep,
|
||||
StableDiffusionXLAutoVaeEncoderStep,
|
||||
StableDiffusionXLTextEncoderStep,
|
||||
)
|
||||
from .modular_block_mappings import (
|
||||
from .modular_blocks_presets import (
|
||||
AUTO_BLOCKS,
|
||||
CONTROLNET_BLOCKS,
|
||||
CONTROLNET_UNION_BLOCKS,
|
||||
IMAGE2IMAGE_BLOCKS,
|
||||
INPAINT_BLOCKS,
|
||||
IP_ADAPTER_BLOCKS,
|
||||
SDXL_SUPPORTED_BLOCKS,
|
||||
TEXT2IMAGE_BLOCKS,
|
||||
StableDiffusionXLAutoDecodeStep,
|
||||
StableDiffusionXLAutoIPAdapterStep,
|
||||
StableDiffusionXLAutoVaeEncoderStep,
|
||||
StableDiffusionXLAutoBlocks,
|
||||
)
|
||||
from .modular_loader import StableDiffusionXLModularLoader
|
||||
from .modular_pipeline_presets import StableDiffusionXLAutoPipeline
|
||||
else:
|
||||
import sys
|
||||
|
||||
|
||||
@@ -26,10 +26,8 @@ from ...schedulers import EulerDiscreteScheduler
|
||||
from ...utils import logging
|
||||
from ...utils.torch_utils import randn_tensor, unwrap_module
|
||||
from ..modular_pipeline import (
|
||||
AutoPipelineBlocks,
|
||||
PipelineBlock,
|
||||
PipelineState,
|
||||
SequentialPipelineBlocks,
|
||||
)
|
||||
from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam
|
||||
from .modular_loader import StableDiffusionXLModularLoader
|
||||
@@ -1909,110 +1907,3 @@ class StableDiffusionXLControlNetUnionInputStep(PipelineBlock):
|
||||
self.add_block_state(state, block_state)
|
||||
|
||||
return components, state
|
||||
|
||||
|
||||
class StableDiffusionXLControlNetAutoInput(AutoPipelineBlocks):
|
||||
block_classes = [StableDiffusionXLControlNetUnionInputStep, StableDiffusionXLControlNetInputStep]
|
||||
block_names = ["controlnet_union", "controlnet"]
|
||||
block_trigger_inputs = ["control_mode", "control_image"]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Controlnet Input step that prepare the controlnet input.\n"
|
||||
+ "This is an auto pipeline block that works for both controlnet and controlnet_union.\n"
|
||||
+ " - `StableDiffusionXLControlNetUnionInputStep` is called to prepare the controlnet input when `control_mode` and `control_image` are provided.\n"
|
||||
+ " - `StableDiffusionXLControlNetInputStep` is called to prepare the controlnet input when `control_image` is provided."
|
||||
)
|
||||
|
||||
|
||||
# Before denoise
|
||||
class StableDiffusionXLBeforeDenoiseStep(SequentialPipelineBlocks):
|
||||
block_classes = [
|
||||
StableDiffusionXLInputStep,
|
||||
StableDiffusionXLSetTimestepsStep,
|
||||
StableDiffusionXLPrepareLatentsStep,
|
||||
StableDiffusionXLPrepareAdditionalConditioningStep,
|
||||
StableDiffusionXLControlNetAutoInput,
|
||||
]
|
||||
block_names = ["input", "set_timesteps", "prepare_latents", "prepare_add_cond", "controlnet_input"]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Before denoise step that prepare the inputs for the denoise step.\n"
|
||||
+ "This is a sequential pipeline blocks:\n"
|
||||
+ " - `StableDiffusionXLInputStep` is used to adjust the batch size of the model inputs\n"
|
||||
+ " - `StableDiffusionXLSetTimestepsStep` is used to set the timesteps\n"
|
||||
+ " - `StableDiffusionXLPrepareLatentsStep` is used to prepare the latents\n"
|
||||
+ " - `StableDiffusionXLPrepareAdditionalConditioningStep` is used to prepare the additional conditioning\n"
|
||||
+ " - `StableDiffusionXLControlNetAutoInput` is used to prepare the controlnet input"
|
||||
)
|
||||
|
||||
|
||||
class StableDiffusionXLImg2ImgBeforeDenoiseStep(SequentialPipelineBlocks):
|
||||
block_classes = [
|
||||
StableDiffusionXLInputStep,
|
||||
StableDiffusionXLImg2ImgSetTimestepsStep,
|
||||
StableDiffusionXLImg2ImgPrepareLatentsStep,
|
||||
StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep,
|
||||
StableDiffusionXLControlNetAutoInput,
|
||||
]
|
||||
block_names = ["input", "set_timesteps", "prepare_latents", "prepare_add_cond", "controlnet_input"]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Before denoise step that prepare the inputs for the denoise step for img2img task.\n"
|
||||
+ "This is a sequential pipeline blocks:\n"
|
||||
+ " - `StableDiffusionXLInputStep` is used to adjust the batch size of the model inputs\n"
|
||||
+ " - `StableDiffusionXLImg2ImgSetTimestepsStep` is used to set the timesteps\n"
|
||||
+ " - `StableDiffusionXLImg2ImgPrepareLatentsStep` is used to prepare the latents\n"
|
||||
+ " - `StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep` is used to prepare the additional conditioning\n"
|
||||
+ " - `StableDiffusionXLControlNetAutoInput` is used to prepare the controlnet input"
|
||||
)
|
||||
|
||||
|
||||
class StableDiffusionXLInpaintBeforeDenoiseStep(SequentialPipelineBlocks):
|
||||
block_classes = [
|
||||
StableDiffusionXLInputStep,
|
||||
StableDiffusionXLImg2ImgSetTimestepsStep,
|
||||
StableDiffusionXLInpaintPrepareLatentsStep,
|
||||
StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep,
|
||||
StableDiffusionXLControlNetAutoInput,
|
||||
]
|
||||
block_names = ["input", "set_timesteps", "prepare_latents", "prepare_add_cond", "controlnet_input"]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Before denoise step that prepare the inputs for the denoise step for inpainting task.\n"
|
||||
+ "This is a sequential pipeline blocks:\n"
|
||||
+ " - `StableDiffusionXLInputStep` is used to adjust the batch size of the model inputs\n"
|
||||
+ " - `StableDiffusionXLImg2ImgSetTimestepsStep` is used to set the timesteps\n"
|
||||
+ " - `StableDiffusionXLInpaintPrepareLatentsStep` is used to prepare the latents\n"
|
||||
+ " - `StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep` is used to prepare the additional conditioning\n"
|
||||
+ " - `StableDiffusionXLControlNetAutoInput` is used to prepare the controlnet input"
|
||||
)
|
||||
|
||||
|
||||
class StableDiffusionXLAutoBeforeDenoiseStep(AutoPipelineBlocks):
|
||||
block_classes = [
|
||||
StableDiffusionXLInpaintBeforeDenoiseStep,
|
||||
StableDiffusionXLImg2ImgBeforeDenoiseStep,
|
||||
StableDiffusionXLBeforeDenoiseStep,
|
||||
]
|
||||
block_names = ["inpaint", "img2img", "text2img"]
|
||||
block_trigger_inputs = ["mask", "image_latents", None]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Before denoise step that prepare the inputs for the denoise step.\n"
|
||||
+ "This is an auto pipeline block that works for text2img, img2img and inpainting tasks as well as controlnet, controlnet_union.\n"
|
||||
+ " - `StableDiffusionXLInpaintBeforeDenoiseStep` (inpaint) is used when both `mask` and `image_latents` are provided.\n"
|
||||
+ " - `StableDiffusionXLImg2ImgBeforeDenoiseStep` (img2img) is used when only `image_latents` is provided.\n"
|
||||
+ " - `StableDiffusionXLBeforeDenoiseStep` (text2img) is used when both `image_latents` and `mask` are not provided.\n"
|
||||
+ " - `StableDiffusionXLControlNetUnionInputStep` is called to prepare the controlnet input when `control_mode` and `control_image` are provided.\n"
|
||||
+ " - `StableDiffusionXLControlNetInputStep` is called to prepare the controlnet input when `control_image` is provided."
|
||||
)
|
||||
|
||||
@@ -24,10 +24,8 @@ from ...models import AutoencoderKL
|
||||
from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
|
||||
from ...utils import logging
|
||||
from ..modular_pipeline import (
|
||||
AutoPipelineBlocks,
|
||||
PipelineBlock,
|
||||
PipelineState,
|
||||
SequentialPipelineBlocks,
|
||||
)
|
||||
from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam
|
||||
|
||||
@@ -219,32 +217,3 @@ class StableDiffusionXLInpaintOverlayMaskStep(PipelineBlock):
|
||||
self.add_block_state(state, block_state)
|
||||
|
||||
return components, state
|
||||
|
||||
|
||||
class StableDiffusionXLInpaintDecodeStep(SequentialPipelineBlocks):
|
||||
block_classes = [StableDiffusionXLDecodeStep, StableDiffusionXLInpaintOverlayMaskStep]
|
||||
block_names = ["decode", "mask_overlay"]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Inpaint decode step that decode the denoised latents into images outputs.\n"
|
||||
+ "This is a sequential pipeline blocks:\n"
|
||||
+ " - `StableDiffusionXLDecodeStep` is used to decode the denoised latents into images\n"
|
||||
+ " - `StableDiffusionXLInpaintOverlayMaskStep` is used to overlay the mask on the image"
|
||||
)
|
||||
|
||||
|
||||
class StableDiffusionXLAutoDecodeStep(AutoPipelineBlocks):
|
||||
block_classes = [StableDiffusionXLInpaintDecodeStep, StableDiffusionXLDecodeStep]
|
||||
block_names = ["inpaint", "non-inpaint"]
|
||||
block_trigger_inputs = ["padding_mask_crop", None]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Decode step that decode the denoised latents into images outputs.\n"
|
||||
+ "This is an auto pipeline block that works for inpainting and non-inpainting tasks.\n"
|
||||
+ " - `StableDiffusionXLInpaintDecodeStep` (inpaint) is used when `padding_mask_crop` is provided.\n"
|
||||
+ " - `StableDiffusionXLDecodeStep` (non-inpaint) is used when `padding_mask_crop` is not provided."
|
||||
)
|
||||
|
||||
@@ -23,7 +23,6 @@ from ...models import ControlNetModel, UNet2DConditionModel
|
||||
from ...schedulers import EulerDiscreteScheduler
|
||||
from ...utils import logging
|
||||
from ..modular_pipeline import (
|
||||
AutoPipelineBlocks,
|
||||
BlockState,
|
||||
LoopSequentialPipelineBlocks,
|
||||
PipelineBlock,
|
||||
@@ -49,7 +48,11 @@ class StableDiffusionXLLoopBeforeDenoiser(PipelineBlock):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "step within the denoising loop that prepare the latent input for the denoiser. This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
return (
|
||||
"step within the denoising loop that prepare the latent input for the denoiser. "
|
||||
"This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` "
|
||||
"object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
)
|
||||
|
||||
@property
|
||||
def intermediates_inputs(self) -> List[str]:
|
||||
@@ -82,7 +85,10 @@ class StableDiffusionXLInpaintLoopBeforeDenoiser(PipelineBlock):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "step within the denoising loop that prepare the latent input for the denoiser (for inpainting workflow only). This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` object"
|
||||
return (
|
||||
"step within the denoising loop that prepare the latent input for the denoiser (for inpainting workflow only). "
|
||||
"This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` object"
|
||||
)
|
||||
|
||||
@property
|
||||
def intermediates_inputs(self) -> List[str]:
|
||||
@@ -155,7 +161,11 @@ class StableDiffusionXLLoopDenoiser(PipelineBlock):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Step within the denoising loop that denoise the latents with guidance. This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
return (
|
||||
"Step within the denoising loop that denoise the latents with guidance. "
|
||||
"This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` "
|
||||
"object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
)
|
||||
|
||||
@property
|
||||
def inputs(self) -> List[Tuple[str, Any]]:
|
||||
@@ -257,7 +267,11 @@ class StableDiffusionXLControlNetLoopDenoiser(PipelineBlock):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "step within the denoising loop that denoise the latents with guidance (with controlnet). This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
return (
|
||||
"step within the denoising loop that denoise the latents with guidance (with controlnet). "
|
||||
"This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` "
|
||||
"object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
)
|
||||
|
||||
@property
|
||||
def inputs(self) -> List[Tuple[str, Any]]:
|
||||
@@ -446,7 +460,11 @@ class StableDiffusionXLLoopAfterDenoiser(PipelineBlock):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "step within the denoising loop that update the latents. This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
return (
|
||||
"step within the denoising loop that update the latents. "
|
||||
"This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` "
|
||||
"object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
)
|
||||
|
||||
@property
|
||||
def inputs(self) -> List[Tuple[str, Any]]:
|
||||
@@ -514,7 +532,11 @@ class StableDiffusionXLInpaintLoopAfterDenoiser(PipelineBlock):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "step within the denoising loop that update the latents (for inpainting workflow only). This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
return (
|
||||
"step within the denoising loop that update the latents (for inpainting workflow only). "
|
||||
"This block should be used to compose the `blocks` attribute of a `LoopSequentialPipelineBlocks` "
|
||||
"object (e.g. `StableDiffusionXLDenoiseLoopWrapper`)"
|
||||
)
|
||||
|
||||
@property
|
||||
def inputs(self) -> List[Tuple[str, Any]]:
|
||||
@@ -619,7 +641,10 @@ class StableDiffusionXLDenoiseLoopWrapper(LoopSequentialPipelineBlocks):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Pipeline block that iteratively denoise the latents over `timesteps`. The specific steps with each iteration can be customized with `blocks` attributes"
|
||||
return (
|
||||
"Pipeline block that iteratively denoise the latents over `timesteps`. "
|
||||
"The specific steps with each iteration can be customized with `blocks` attributes"
|
||||
)
|
||||
|
||||
@property
|
||||
def loop_expected_components(self) -> List[ComponentSpec]:
|
||||
@@ -679,7 +704,7 @@ class StableDiffusionXLDenoiseLoopWrapper(LoopSequentialPipelineBlocks):
|
||||
|
||||
|
||||
# composing the denoising loops
|
||||
class StableDiffusionXLDenoiseLoop(StableDiffusionXLDenoiseLoopWrapper):
|
||||
class StableDiffusionXLDenoiseStep(StableDiffusionXLDenoiseLoopWrapper):
|
||||
block_classes = [
|
||||
StableDiffusionXLLoopBeforeDenoiser,
|
||||
StableDiffusionXLLoopDenoiser,
|
||||
@@ -696,11 +721,12 @@ class StableDiffusionXLDenoiseLoop(StableDiffusionXLDenoiseLoopWrapper):
|
||||
" - `StableDiffusionXLLoopBeforeDenoiser`\n"
|
||||
" - `StableDiffusionXLLoopDenoiser`\n"
|
||||
" - `StableDiffusionXLLoopAfterDenoiser`\n"
|
||||
"This block supports both text2img and img2img tasks."
|
||||
)
|
||||
|
||||
|
||||
# control_cond
|
||||
class StableDiffusionXLControlNetDenoiseLoop(StableDiffusionXLDenoiseLoopWrapper):
|
||||
class StableDiffusionXLControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper):
|
||||
block_classes = [
|
||||
StableDiffusionXLLoopBeforeDenoiser,
|
||||
StableDiffusionXLControlNetLoopDenoiser,
|
||||
@@ -717,11 +743,12 @@ class StableDiffusionXLControlNetDenoiseLoop(StableDiffusionXLDenoiseLoopWrapper
|
||||
" - `StableDiffusionXLLoopBeforeDenoiser`\n"
|
||||
" - `StableDiffusionXLControlNetLoopDenoiser`\n"
|
||||
" - `StableDiffusionXLLoopAfterDenoiser`\n"
|
||||
"This block supports using controlnet for both text2img and img2img tasks."
|
||||
)
|
||||
|
||||
|
||||
# mask
|
||||
class StableDiffusionXLInpaintDenoiseLoop(StableDiffusionXLDenoiseLoopWrapper):
|
||||
class StableDiffusionXLInpaintDenoiseStep(StableDiffusionXLDenoiseLoopWrapper):
|
||||
block_classes = [
|
||||
StableDiffusionXLInpaintLoopBeforeDenoiser,
|
||||
StableDiffusionXLLoopDenoiser,
|
||||
@@ -738,11 +765,12 @@ class StableDiffusionXLInpaintDenoiseLoop(StableDiffusionXLDenoiseLoopWrapper):
|
||||
" - `StableDiffusionXLInpaintLoopBeforeDenoiser`\n"
|
||||
" - `StableDiffusionXLLoopDenoiser`\n"
|
||||
" - `StableDiffusionXLInpaintLoopAfterDenoiser`\n"
|
||||
"This block onlysupports inpainting tasks."
|
||||
)
|
||||
|
||||
|
||||
# control_cond + mask
|
||||
class StableDiffusionXLInpaintControlNetDenoiseLoop(StableDiffusionXLDenoiseLoopWrapper):
|
||||
class StableDiffusionXLInpaintControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper):
|
||||
block_classes = [
|
||||
StableDiffusionXLInpaintLoopBeforeDenoiser,
|
||||
StableDiffusionXLControlNetLoopDenoiser,
|
||||
@@ -759,52 +787,5 @@ class StableDiffusionXLInpaintControlNetDenoiseLoop(StableDiffusionXLDenoiseLoop
|
||||
" - `StableDiffusionXLInpaintLoopBeforeDenoiser`\n"
|
||||
" - `StableDiffusionXLControlNetLoopDenoiser`\n"
|
||||
" - `StableDiffusionXLInpaintLoopAfterDenoiser`\n"
|
||||
)
|
||||
|
||||
|
||||
# all task without controlnet
|
||||
class StableDiffusionXLDenoiseStep(AutoPipelineBlocks):
|
||||
block_classes = [StableDiffusionXLInpaintDenoiseLoop, StableDiffusionXLDenoiseLoop]
|
||||
block_names = ["inpaint_denoise", "denoise"]
|
||||
block_trigger_inputs = ["mask", None]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Denoise step that iteratively denoise the latents. "
|
||||
"This is a auto pipeline block that works for text2img, img2img and inpainting tasks."
|
||||
" - `StableDiffusionXLDenoiseStep` (denoise) is used when no mask is provided."
|
||||
" - `StableDiffusionXLInpaintDenoiseStep` (inpaint_denoise) is used when mask is provided."
|
||||
)
|
||||
|
||||
|
||||
# all task with controlnet
|
||||
class StableDiffusionXLControlNetDenoiseStep(AutoPipelineBlocks):
|
||||
block_classes = [StableDiffusionXLInpaintControlNetDenoiseLoop, StableDiffusionXLControlNetDenoiseLoop]
|
||||
block_names = ["inpaint_controlnet_denoise", "controlnet_denoise"]
|
||||
block_trigger_inputs = ["mask", None]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Denoise step that iteratively denoise the latents with controlnet. "
|
||||
"This is a auto pipeline block that works for text2img, img2img and inpainting tasks."
|
||||
" - `StableDiffusionXLControlNetDenoiseStep` (controlnet_denoise) is used when no mask is provided."
|
||||
" - `StableDiffusionXLInpaintControlNetDenoiseStep` (inpaint_controlnet_denoise) is used when mask is provided."
|
||||
)
|
||||
|
||||
|
||||
# all task with or without controlnet
|
||||
class StableDiffusionXLAutoDenoiseStep(AutoPipelineBlocks):
|
||||
block_classes = [StableDiffusionXLControlNetDenoiseStep, StableDiffusionXLDenoiseStep]
|
||||
block_names = ["controlnet_denoise", "denoise"]
|
||||
block_trigger_inputs = ["controlnet_cond", None]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Denoise step that iteratively denoise the latents. "
|
||||
"This is a auto pipeline block that works for text2img, img2img and inpainting tasks. And can be used with or without controlnet."
|
||||
" - `StableDiffusionXLDenoiseStep` (denoise) is used when no controlnet_cond is provided (work for text2img, img2img and inpainting tasks)."
|
||||
" - `StableDiffusionXLControlNetDenoiseStep` (controlnet_denoise) is used when controlnet_cond is provided (work for text2img, img2img and inpainting tasks)."
|
||||
"This block only supports using controlnet for inpainting tasks."
|
||||
)
|
||||
|
||||
@@ -26,7 +26,7 @@ from transformers import (
|
||||
from ...configuration_utils import FrozenDict
|
||||
from ...guiders import ClassifierFreeGuidance
|
||||
from ...image_processor import PipelineImageInput, VaeImageProcessor
|
||||
from ...loaders import ModularIPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
|
||||
from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
|
||||
from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
|
||||
from ...models.lora import adjust_lora_scale_text_encoder
|
||||
from ...utils import (
|
||||
@@ -35,7 +35,7 @@ from ...utils import (
|
||||
scale_lora_layers,
|
||||
unscale_lora_layers,
|
||||
)
|
||||
from ..modular_pipeline import AutoPipelineBlocks, PipelineBlock, PipelineState
|
||||
from ..modular_pipeline import PipelineBlock, PipelineState
|
||||
from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam
|
||||
from .modular_loader import StableDiffusionXLModularLoader
|
||||
|
||||
@@ -893,30 +893,3 @@ class StableDiffusionXLInpaintVaeEncoderStep(PipelineBlock):
|
||||
self.add_block_state(state, block_state)
|
||||
|
||||
return components, state
|
||||
|
||||
|
||||
# auto blocks (YiYi TODO: maybe move all the auto blocks to a separate file)
|
||||
# Encode
|
||||
class StableDiffusionXLAutoVaeEncoderStep(AutoPipelineBlocks):
|
||||
block_classes = [StableDiffusionXLInpaintVaeEncoderStep, StableDiffusionXLVaeEncoderStep]
|
||||
block_names = ["inpaint", "img2img"]
|
||||
block_trigger_inputs = ["mask_image", "image"]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return (
|
||||
"Vae encoder step that encode the image inputs into their latent representations.\n"
|
||||
+ "This is an auto pipeline block that works for both inpainting and img2img tasks.\n"
|
||||
+ " - `StableDiffusionXLInpaintVaeEncoderStep` (inpaint) is used when both `mask_image` and `image` are provided.\n"
|
||||
+ " - `StableDiffusionXLVaeEncoderStep` (img2img) is used when only `image` is provided."
|
||||
)
|
||||
|
||||
|
||||
class StableDiffusionXLAutoIPAdapterStep(AutoPipelineBlocks, ModularIPAdapterMixin):
|
||||
block_classes = [StableDiffusionXLIPAdapterStep]
|
||||
block_names = ["ip_adapter"]
|
||||
block_trigger_inputs = ["ip_adapter_image"]
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return "Run IP Adapter step if `ip_adapter_image` is provided."
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
from ..utils import DummyObject, requires_backends
|
||||
|
||||
|
||||
class StableDiffusionXLAutoPipeline(metaclass=DummyObject):
|
||||
class StableDiffusionXLAutoBlocks(metaclass=DummyObject):
|
||||
_backends = ["torch", "transformers"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
Reference in New Issue
Block a user