From 3028089e5e6554a7e20a07d87153228d39f91695 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Tolga=20Cang=C3=B6z?= <46008593+standardAI@users.noreply.github.com> Date: Thu, 21 Mar 2024 04:46:47 +0300 Subject: [PATCH] Fix typos (#7411) * Fix typos * Fix typo in SVD.md --- docs/source/en/using-diffusers/svd.md | 4 ++-- examples/community/unclip_text_interpolation.py | 2 +- .../pipelines/kandinsky/pipeline_kandinsky_combined.py | 6 +++--- .../pipelines/kandinsky/pipeline_kandinsky_prior.py | 2 +- .../kandinsky2_2/pipeline_kandinsky2_2_combined.py | 6 +++--- .../pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py | 2 +- .../kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py | 2 +- src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py | 2 +- .../stable_diffusion/pipeline_stable_diffusion_depth2img.py | 4 ++-- .../pipeline_stable_diffusion_instruct_pix2pix.py | 2 +- .../pipelines/stable_diffusion/pipeline_stable_unclip.py | 2 +- .../pipeline_stable_diffusion_xl_instruct_pix2pix.py | 2 +- src/diffusers/schedulers/scheduling_consistency_models.py | 2 +- src/diffusers/schedulers/scheduling_deis_multistep.py | 2 +- src/diffusers/schedulers/scheduling_dpmsolver_multistep.py | 2 +- src/diffusers/schedulers/scheduling_dpmsolver_sde.py | 2 +- src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py | 2 +- .../schedulers/scheduling_edm_dpmsolver_multistep.py | 2 +- src/diffusers/schedulers/scheduling_edm_euler.py | 2 +- .../schedulers/scheduling_euler_ancestral_discrete.py | 2 +- src/diffusers/schedulers/scheduling_euler_discrete.py | 2 +- src/diffusers/schedulers/scheduling_heun_discrete.py | 2 +- .../schedulers/scheduling_k_dpm_2_ancestral_discrete.py | 2 +- src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py | 2 +- src/diffusers/schedulers/scheduling_lms_discrete.py | 2 +- src/diffusers/schedulers/scheduling_unipc_multistep.py | 2 +- 26 files changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/source/en/using-diffusers/svd.md b/docs/source/en/using-diffusers/svd.md index c9c51f55f0..cbb74d7b10 100644 --- a/docs/source/en/using-diffusers/svd.md +++ b/docs/source/en/using-diffusers/svd.md @@ -21,7 +21,7 @@ This guide will show you how to use SVD to generate short videos from images. Before you begin, make sure you have the following libraries installed: ```py -!pip install -q -U diffusers transformers accelerate +!pip install -q -U diffusers transformers accelerate ``` The are two variants of this model, [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid) and [SVD-XT](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt). The SVD checkpoint is trained to generate 14 frames and the SVD-XT checkpoint is further finetuned to generate 25 frames. @@ -86,7 +86,7 @@ Video generation is very memory intensive because you're essentially generating + frames = pipe(image, decode_chunk_size=2, generator=generator, num_frames=25).frames[0] ``` -Using all these tricks togethere should lower the memory requirement to less than 8GB VRAM. +Using all these tricks together should lower the memory requirement to less than 8GB VRAM. ## Micro-conditioning diff --git a/examples/community/unclip_text_interpolation.py b/examples/community/unclip_text_interpolation.py index a92ecc8b87..84f1c5a21f 100644 --- a/examples/community/unclip_text_interpolation.py +++ b/examples/community/unclip_text_interpolation.py @@ -48,7 +48,7 @@ class UnCLIPTextInterpolationPipeline(DiffusionPipeline): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index da5ff52ed1..6b1ed62f8a 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -129,7 +129,7 @@ class KandinskyCombinedPipeline(DiffusionPipeline): movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): @@ -346,7 +346,7 @@ class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): @@ -586,7 +586,7 @@ class KandinskyInpaintCombinedPipeline(DiffusionPipeline): movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index 0d9f543496..7d9be4570f 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -134,7 +134,7 @@ class KandinskyPriorPipeline(DiffusionPipeline): Args: prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. text_encoder ([`CLIPTextModelWithProjection`]): diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index 65ba22cd6e..32f6b4415f 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -119,7 +119,7 @@ class KandinskyV22CombinedPipeline(DiffusionPipeline): movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): @@ -346,7 +346,7 @@ class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): @@ -594,7 +594,7 @@ class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index 83427c68f2..1455e20ab5 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -90,7 +90,7 @@ class KandinskyV22PriorPipeline(DiffusionPipeline): Args: prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. text_encoder ([`CLIPTextModelWithProjection`]): diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py index bef70821c6..26442735e8 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -108,7 +108,7 @@ class KandinskyV22PriorEmb2EmbPipeline(DiffusionPipeline): Args: prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. text_encoder ([`CLIPTextModelWithProjection`]): diff --git a/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py b/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py index 641ec56a15..02e32633ce 100644 --- a/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +++ b/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py @@ -86,7 +86,7 @@ class ShapEImg2ImgPipeline(DiffusionPipeline): Args: prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. image_encoder ([`~transformers.CLIPVisionModel`]): Frozen image-encoder. image_processor ([`~transformers.CLIPImageProcessor`]): diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py index c410acbeda..72b438cd33 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py @@ -700,8 +700,8 @@ class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoader >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> init_image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "two tigers" - >>> n_propmt = "bad, deformed, ugly, bad anotomy" - >>> image = pipe(prompt=prompt, image=init_image, negative_prompt=n_propmt, strength=0.7).images[0] + >>> n_prompt = "bad, deformed, ugly, bad anotomy" + >>> image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0] ``` Returns: diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py index cbb6ed4fa1..01c2eaea06 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py @@ -194,7 +194,7 @@ class StableDiffusionInstructPix2PixPipeline( A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. image_guidance_scale (`float`, *optional*, defaults to 1.5): - Push the generated image towards the inital `image`. Image guidance scale is enabled by setting + Push the generated image towards the initial `image`. Image guidance scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a value of at least `1`. diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py index c62e0f4ec5..32b43a8e7f 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py @@ -76,7 +76,7 @@ class StableUnCLIPPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInver prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen [`CLIPTextModelWithProjection`] text-encoder. prior ([`PriorTransformer`]): - The canonincal unCLIP prior to approximate the image embedding from the text embedding. + The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_scheduler ([`KarrasDiffusionSchedulers`]): Scheduler used in the prior denoising process. image_normalizer ([`StableUnCLIPImageNormalizer`]): diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py index 51e413d4b5..026854d5c4 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py @@ -659,7 +659,7 @@ class StableDiffusionXLInstructPix2PixPipeline( 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. image_guidance_scale (`float`, *optional*, defaults to 1.5): - Image guidance scale is to push the generated image towards the inital image `image`. Image guidance + Image guidance scale is to push the generated image towards the initial image `image`. Image guidance scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to generate images that are closely linked to the source image `image`, usually at the expense of lower image quality. This pipeline requires a value of at least `1`. diff --git a/src/diffusers/schedulers/scheduling_consistency_models.py b/src/diffusers/schedulers/scheduling_consistency_models.py index 316d657e8d..3abf52f0af 100644 --- a/src/diffusers/schedulers/scheduling_consistency_models.py +++ b/src/diffusers/schedulers/scheduling_consistency_models.py @@ -438,7 +438,7 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_deis_multistep.py b/src/diffusers/schedulers/scheduling_deis_multistep.py index 27213c0af1..f25fbf029b 100644 --- a/src/diffusers/schedulers/scheduling_deis_multistep.py +++ b/src/diffusers/schedulers/scheduling_deis_multistep.py @@ -775,7 +775,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py b/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py index 1c8ec1090b..5b452bddba 100644 --- a/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py +++ b/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py @@ -1018,7 +1018,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_sde.py b/src/diffusers/schedulers/scheduling_dpmsolver_sde.py index 22aeba5750..54455b0f2e 100644 --- a/src/diffusers/schedulers/scheduling_dpmsolver_sde.py +++ b/src/diffusers/schedulers/scheduling_dpmsolver_sde.py @@ -547,7 +547,7 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py b/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py index 3113d61a94..215d94a786 100644 --- a/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py +++ b/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py @@ -968,7 +968,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py b/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py index f490da07ab..9422d57cff 100644 --- a/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +++ b/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py @@ -673,7 +673,7 @@ class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_edm_euler.py b/src/diffusers/schedulers/scheduling_edm_euler.py index 7f0ef90294..bad6aeff8b 100644 --- a/src/diffusers/schedulers/scheduling_edm_euler.py +++ b/src/diffusers/schedulers/scheduling_edm_euler.py @@ -371,7 +371,7 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py b/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py index 81009b4fe7..6631ef63a8 100644 --- a/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py +++ b/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py @@ -471,7 +471,7 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_euler_discrete.py b/src/diffusers/schedulers/scheduling_euler_discrete.py index 62d0caa4e2..1e3252c0bd 100644 --- a/src/diffusers/schedulers/scheduling_euler_discrete.py +++ b/src/diffusers/schedulers/scheduling_euler_discrete.py @@ -566,7 +566,7 @@ class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_heun_discrete.py b/src/diffusers/schedulers/scheduling_heun_discrete.py index 14ab01da74..fb8f28f73d 100644 --- a/src/diffusers/schedulers/scheduling_heun_discrete.py +++ b/src/diffusers/schedulers/scheduling_heun_discrete.py @@ -472,7 +472,7 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py b/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py index af50fcf54f..fd2b94759f 100644 --- a/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +++ b/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py @@ -498,7 +498,7 @@ class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py b/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py index 888ba311a9..57a1af6b79 100644 --- a/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py +++ b/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py @@ -473,7 +473,7 @@ class KDPM2DiscreteScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_lms_discrete.py b/src/diffusers/schedulers/scheduling_lms_discrete.py index c8abade3e5..61a91783c2 100644 --- a/src/diffusers/schedulers/scheduling_lms_discrete.py +++ b/src/diffusers/schedulers/scheduling_lms_discrete.py @@ -465,7 +465,7 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() diff --git a/src/diffusers/schedulers/scheduling_unipc_multistep.py b/src/diffusers/schedulers/scheduling_unipc_multistep.py index a844bcaec3..3c0955ce4e 100644 --- a/src/diffusers/schedulers/scheduling_unipc_multistep.py +++ b/src/diffusers/schedulers/scheduling_unipc_multistep.py @@ -869,7 +869,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin): # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: - # add noise is called bevore first denoising step to create inital latent(img2img) + # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten()