From 4d40ea38979650146d342ece8df54ec98b7f616b Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Fri, 18 Apr 2025 11:24:47 +0200 Subject: [PATCH] update --- .../pipeline_animatediff_controlnet.py | 4 ++-- .../pipeline_animatediff_img2video.py | 2 +- .../community/pipeline_animatediff_ipex.py | 4 ++-- examples/community/pipeline_stg_cogvideox.py | 2 +- .../community/unclip_image_interpolation.py | 10 ++++----- .../community/unclip_text_interpolation.py | 8 +++---- ...convert_kakao_brain_unclip_to_diffusers.py | 2 +- .../models/unets/unet_stable_cascade.py | 2 +- .../animatediff/pipeline_animatediff_sdxl.py | 4 ++-- .../pipeline_animatediff_sparsectrl.py | 4 ++-- .../pipelines/cogvideo/pipeline_cogvideox.py | 2 +- .../cogview3/pipeline_cogview3plus.py | 2 +- .../i2vgen_xl/pipeline_i2vgen_xl.py | 2 +- .../deprecated/latte/pipeline_latte.py | 2 +- .../pipelines/deprecated/pia/pipeline_pia.py | 4 ++-- .../pipeline_semantic_stable_diffusion.py | 2 +- .../deprecated/shap_e/pipeline_shap_e.py | 2 +- .../shap_e/pipeline_shap_e_img2img.py | 2 +- ...ipeline_stable_diffusion_xl_k_diffusion.py | 4 ++-- .../pipeline_stable_diffusion_safe.py | 2 +- .../pipeline_stable_diffusion_sag.py | 2 +- .../pipeline_text_to_video_synth.py | 2 +- .../pipeline_text_to_video_synth_img2img.py | 2 +- .../pipeline_text_to_video_zero.py | 2 +- .../pipeline_text_to_video_zero_sdxl.py | 22 +++++++++---------- .../unclip/pipeline_unclip_image_variation.py | 2 +- .../wuerstchen/pipeline_wuerstchen.py | 2 +- .../wuerstchen/pipeline_wuerstchen_prior.py | 2 +- .../pipelines/kandinsky/pipeline_kandinsky.py | 2 +- .../kandinsky/pipeline_kandinsky_inpaint.py | 2 +- .../kandinsky/pipeline_kandinsky_prior.py | 2 +- .../kandinsky2_2/pipeline_kandinsky2_2.py | 2 +- .../pipeline_kandinsky2_2_controlnet.py | 2 +- .../pipeline_kandinsky2_2_inpainting.py | 2 +- .../pipeline_kandinsky2_2_prior.py | 2 +- .../pag/pipeline_pag_sd_animatediff.py | 2 +- .../pipeline_stable_unclip.py | 4 ++-- tests/pipelines/unclip/test_unclip.py | 2 +- .../unclip/test_unclip_image_variation.py | 2 +- 39 files changed, 63 insertions(+), 63 deletions(-) diff --git a/examples/community/pipeline_animatediff_controlnet.py b/examples/community/pipeline_animatediff_controlnet.py index 9f99ad248b..657d58f46d 100644 --- a/examples/community/pipeline_animatediff_controlnet.py +++ b/examples/community/pipeline_animatediff_controlnet.py @@ -436,7 +436,7 @@ class AnimateDiffControlNetPipeline( image_embeds = ip_adapter_image_embeds return image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -663,7 +663,7 @@ class AnimateDiffControlNetPipeline( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/examples/community/pipeline_animatediff_img2video.py b/examples/community/pipeline_animatediff_img2video.py index f7f0cf31c5..63d0111db1 100644 --- a/examples/community/pipeline_animatediff_img2video.py +++ b/examples/community/pipeline_animatediff_img2video.py @@ -553,7 +553,7 @@ class AnimateDiffImgToVideoPipeline( image_embeds = ip_adapter_image_embeds return image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents diff --git a/examples/community/pipeline_animatediff_ipex.py b/examples/community/pipeline_animatediff_ipex.py index 06508f217c..90835e671b 100644 --- a/examples/community/pipeline_animatediff_ipex.py +++ b/examples/community/pipeline_animatediff_ipex.py @@ -425,7 +425,7 @@ class AnimateDiffPipelineIpex( return image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -520,7 +520,7 @@ class AnimateDiffPipelineIpex( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/examples/community/pipeline_stg_cogvideox.py b/examples/community/pipeline_stg_cogvideox.py index 2e7f7906a3..ef387b3848 100644 --- a/examples/community/pipeline_stg_cogvideox.py +++ b/examples/community/pipeline_stg_cogvideox.py @@ -427,7 +427,7 @@ class CogVideoXSTGPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.latte.pipeline_latte.LattePipeline.check_inputs def check_inputs( self, prompt, diff --git a/examples/community/unclip_image_interpolation.py b/examples/community/unclip_image_interpolation.py index 210bd61ecd..b66fef981d 100644 --- a/examples/community/unclip_image_interpolation.py +++ b/examples/community/unclip_image_interpolation.py @@ -18,7 +18,7 @@ from diffusers import ( UNet2DConditionModel, UNet2DModel, ) -from diffusers.pipelines.unclip import UnCLIPTextProjModel +from diffusers.pipelines.deprecated.unclip import UnCLIPTextProjModel from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor @@ -84,7 +84,7 @@ class UnCLIPImageInterpolationPipeline(DiffusionPipeline): decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler - # Copied from diffusers.pipelines.unclip.pipeline_unclip_image_variation.UnCLIPImageVariationPipeline.__init__ + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip_image_variation.UnCLIPImageVariationPipeline.__init__ def __init__( self, decoder: UNet2DConditionModel, @@ -113,7 +113,7 @@ class UnCLIPImageInterpolationPipeline(DiffusionPipeline): super_res_scheduler=super_res_scheduler, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) @@ -125,7 +125,7 @@ class UnCLIPImageInterpolationPipeline(DiffusionPipeline): latents = latents * scheduler.init_noise_sigma return latents - # Copied from diffusers.pipelines.unclip.pipeline_unclip_image_variation.UnCLIPImageVariationPipeline._encode_prompt + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip_image_variation.UnCLIPImageVariationPipeline._encode_prompt def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): batch_size = len(prompt) if isinstance(prompt, list) else 1 @@ -189,7 +189,7 @@ class UnCLIPImageInterpolationPipeline(DiffusionPipeline): return prompt_embeds, text_encoder_hidden_states, text_mask - # Copied from diffusers.pipelines.unclip.pipeline_unclip_image_variation.UnCLIPImageVariationPipeline._encode_image + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip_image_variation.UnCLIPImageVariationPipeline._encode_image def _encode_image(self, image, device, num_images_per_prompt, image_embeddings: Optional[torch.Tensor] = None): dtype = next(self.image_encoder.parameters()).dtype diff --git a/examples/community/unclip_text_interpolation.py b/examples/community/unclip_text_interpolation.py index 84f1c5a21f..24166d7c15 100644 --- a/examples/community/unclip_text_interpolation.py +++ b/examples/community/unclip_text_interpolation.py @@ -14,7 +14,7 @@ from diffusers import ( UNet2DConditionModel, UNet2DModel, ) -from diffusers.pipelines.unclip import UnCLIPTextProjModel +from diffusers.pipelines.deprecated.unclip import UnCLIPTextProjModel from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor @@ -78,7 +78,7 @@ class UnCLIPTextInterpolationPipeline(DiffusionPipeline): decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.__init__ + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.__init__ def __init__( self, prior: PriorTransformer, @@ -107,7 +107,7 @@ class UnCLIPTextInterpolationPipeline(DiffusionPipeline): super_res_scheduler=super_res_scheduler, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) @@ -119,7 +119,7 @@ class UnCLIPTextInterpolationPipeline(DiffusionPipeline): latents = latents * scheduler.init_noise_sigma return latents - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt def _encode_prompt( self, prompt, diff --git a/scripts/convert_kakao_brain_unclip_to_diffusers.py b/scripts/convert_kakao_brain_unclip_to_diffusers.py index 5135eaed5b..1727005189 100644 --- a/scripts/convert_kakao_brain_unclip_to_diffusers.py +++ b/scripts/convert_kakao_brain_unclip_to_diffusers.py @@ -7,7 +7,7 @@ from transformers import CLIPTextModelWithProjection, CLIPTokenizer from diffusers import UnCLIPPipeline, UNet2DConditionModel, UNet2DModel from diffusers.models.transformers.prior_transformer import PriorTransformer -from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel +from diffusers.pipelines.deprecated.unclip.text_proj import UnCLIPTextProjModel from diffusers.schedulers.scheduling_unclip import UnCLIPScheduler diff --git a/src/diffusers/models/unets/unet_stable_cascade.py b/src/diffusers/models/unets/unet_stable_cascade.py index f57754435f..308b1eca02 100644 --- a/src/diffusers/models/unets/unet_stable_cascade.py +++ b/src/diffusers/models/unets/unet_stable_cascade.py @@ -27,7 +27,7 @@ from ..attention_processor import Attention from ..modeling_utils import ModelMixin -# Copied from diffusers.pipelines.wuerstchen.modeling_wuerstchen_common.WuerstchenLayerNorm with WuerstchenLayerNorm -> SDCascadeLayerNorm +# Copied from diffusers.pipelines.deprecated.wuerstchen.modeling_wuerstchen_common.WuerstchenLayerNorm with WuerstchenLayerNorm -> SDCascadeLayerNorm class SDCascadeLayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py index 958eb5fb51..cd990e7e23 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py @@ -635,7 +635,7 @@ class AnimateDiffSDXLPipeline( return ip_adapter_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -738,7 +738,7 @@ class AnimateDiffSDXLPipeline( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py index 8c51fddcd5..757f147361 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py @@ -458,7 +458,7 @@ class AnimateDiffSparseControlNetPipeline( return ip_adapter_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -621,7 +621,7 @@ class AnimateDiffSparseControlNetPipeline( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py index 99ae9025cd..7f09befde3 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py @@ -373,7 +373,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.latte.pipeline_latte.LattePipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py index 0cd3943fbc..256e10ed72 100644 --- a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py +++ b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py @@ -333,7 +333,7 @@ class CogView3PlusPipeline(DiffusionPipeline): extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.latte.pipeline_latte.LattePipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py index df8e6b195d..e7e6038d9e 100644 --- a/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py +++ b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py @@ -479,7 +479,7 @@ class I2VGenXLPipeline( return image_latents - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/deprecated/latte/pipeline_latte.py b/src/diffusers/pipelines/deprecated/latte/pipeline_latte.py index 0117f0e7b6..6f4068fc24 100644 --- a/src/diffusers/pipelines/deprecated/latte/pipeline_latte.py +++ b/src/diffusers/pipelines/deprecated/latte/pipeline_latte.py @@ -561,7 +561,7 @@ class LattePipeline(DiffusionPipeline): return caption.strip() - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py index 6401829a8b..d60def5136 100644 --- a/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py +++ b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py @@ -422,7 +422,7 @@ class PIAPipeline( return image_embeds, uncond_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -562,7 +562,7 @@ class PIAPipeline( return ip_adapter_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py index a09160b419..80c2234745 100644 --- a/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +++ b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py @@ -143,7 +143,7 @@ class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e.py b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e.py index 1018cefa81..7cf2bb2c8d 100644 --- a/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e.py +++ b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e.py @@ -127,7 +127,7 @@ class ShapEPipeline(DiffusionPipeline): shap_e_renderer=shap_e_renderer, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e_img2img.py b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e_img2img.py index 3b3e1e8467..3114059d47 100644 --- a/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e_img2img.py +++ b/src/diffusers/pipelines/deprecated/shap_e/pipeline_shap_e_img2img.py @@ -128,7 +128,7 @@ class ShapEImg2ImgPipeline(DiffusionPipeline): shap_e_renderer=shap_e_renderer, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py index 27b8e788dc..dc520600c9 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py @@ -72,7 +72,7 @@ EXAMPLE_DOC_STRING = """ """ -# Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.ModelWrapper +# Copied from diffusers.pipelines.deprecated.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.ModelWrapper class ModelWrapper: def __init__(self, model, alphas_cumprod): self.model = model @@ -187,7 +187,7 @@ class StableDiffusionXLKDiffusionPipeline( else: self.k_diffusion_model = CompVisDenoiser(model) - # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.set_scheduler + # Copied from diffusers.pipelines.deprecated.stable_diffusion_k_diffusion..pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.set_scheduler def set_scheduler(self, scheduler_type: str): library = importlib.import_module("k_diffusion") sampling = getattr(library, "sampling") diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py index d07650ff76..9a198d7e6a 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py @@ -372,7 +372,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline, StableDiffusionMixin, IPAda extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py index ac7b66909d..9bef2192c9 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py @@ -490,7 +490,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, Textua extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py index da7f9d3e6b..eb0a85e429 100644 --- a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py @@ -363,7 +363,7 @@ class TextToVideoSDPipeline( extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py index de17cdd847..513026a0d8 100644 --- a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py @@ -368,7 +368,7 @@ class VideoToVideoSDPipeline( return prompt_embeds, negative_prompt_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents diff --git a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py index 4bf745d499..7fb295a099 100644 --- a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py @@ -464,7 +464,7 @@ class TextToVideoZeroPipeline( return latents.clone().detach() - # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py index 51f05883b1..5ff541f705 100644 --- a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -55,32 +55,32 @@ else: logger = logging.get_logger(__name__) # pylint: disable=invalid-name -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_0 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_0 def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_1 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_1 def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_3 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_3 def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_4 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_4 def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. @@ -140,7 +140,7 @@ class CrossFrameAttnProcessor: return hidden_states -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor2_0 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor2_0 class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. @@ -230,7 +230,7 @@ class TextToVideoSDXLPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.coords_grid +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.coords_grid def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) @@ -238,7 +238,7 @@ def coords_grid(batch, ht, wd, device): return coords[None].repeat(batch, 1, 1, 1) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.warp_single_latent +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.warp_single_latent def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow @@ -266,7 +266,7 @@ def warp_single_latent(latent, reference_flow): return warped -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field @@ -290,7 +290,7 @@ def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ return reference_flow -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field_and_warp_latents +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field_and_warp_latents def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly @@ -832,7 +832,7 @@ class TextToVideoZeroSDXLPipeline( return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoZeroPipeline.forward_loop + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoZeroPipeline.forward_loop def forward_loop(self, x_t0, t0, t1, generator): """ Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. diff --git a/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py index 1e1787362e..816e73c770 100644 --- a/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py +++ b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py @@ -114,7 +114,7 @@ class UnCLIPImageVariationPipeline(DiffusionPipeline): super_res_scheduler=super_res_scheduler, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py index 43861bb9b6..293794d3ea 100644 --- a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py @@ -107,7 +107,7 @@ class WuerstchenDecoderPipeline(DiffusionPipeline): ) self.register_to_config(latent_dim_scale=latent_dim_scale) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py index 3c086c1878..82df873872 100644 --- a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py @@ -126,7 +126,7 @@ class WuerstchenPriorPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin) latent_mean=latent_mean, latent_std=latent_std, resolution_multiple=resolution_multiple ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py index b5f4acf5c0..e63c4c3eff 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -123,7 +123,7 @@ class KandinskyPipeline(DiffusionPipeline): ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index 769c834ec3..c77318b378 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -288,7 +288,7 @@ class KandinskyInpaintPipeline(DiffusionPipeline): self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index a348deef8b..7ef00fb655 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -285,7 +285,7 @@ class KandinskyPriorPipeline(DiffusionPipeline): return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index a584674540..f90c331d0b 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -103,7 +103,7 @@ class KandinskyV22Pipeline(DiffusionPipeline): ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py index bada59080c..1e160db25a 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -145,7 +145,7 @@ class KandinskyV22ControlnetPipeline(DiffusionPipeline): ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 482093a4bb..5bfd1b6de9 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -275,7 +275,7 @@ class KandinskyV22InpaintPipeline(DiffusionPipeline): self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index d05a7fbdb1..b40b4b435d 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -244,7 +244,7 @@ class KandinskyV22PriorPipeline(DiffusionPipeline): return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py index d3a015e569..8942c4af0e 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py @@ -452,7 +452,7 @@ class AnimateDiffPAGPipeline( extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.pia.pipeline_pia.PIAPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.pia.pipeline_pia.PIAPipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py index be01e0acbf..327eecfed6 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py @@ -166,7 +166,7 @@ class StableUnCLIPPipeline( self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder def _encode_prior_prompt( self, prompt, @@ -584,7 +584,7 @@ class StableUnCLIPPipeline( f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/tests/pipelines/unclip/test_unclip.py b/tests/pipelines/unclip/test_unclip.py index 834d97f30d..005509c688 100644 --- a/tests/pipelines/unclip/test_unclip.py +++ b/tests/pipelines/unclip/test_unclip.py @@ -21,7 +21,7 @@ import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import PriorTransformer, UnCLIPPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel -from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel +from diffusers.pipelines.deprecated.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, diff --git a/tests/pipelines/unclip/test_unclip_image_variation.py b/tests/pipelines/unclip/test_unclip_image_variation.py index e402629fe1..2e079a125d 100644 --- a/tests/pipelines/unclip/test_unclip_image_variation.py +++ b/tests/pipelines/unclip/test_unclip_image_variation.py @@ -35,7 +35,7 @@ from diffusers import ( UNet2DConditionModel, UNet2DModel, ) -from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel +from diffusers.pipelines.deprecated.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor,