diff --git a/docs/source/en/modular_diffusers/custom_blocks.md b/docs/source/en/modular_diffusers/custom_blocks.md index 1c31158226..6ef8db613f 100644 --- a/docs/source/en/modular_diffusers/custom_blocks.md +++ b/docs/source/en/modular_diffusers/custom_blocks.md @@ -140,7 +140,7 @@ class Florence2ImageAnnotatorBlock(ModularPipelineBlocks): type_hint=str, required=True, default="mask_image", - description="""Output type from annotation predictions. Availabe options are + description="""Output type from annotation predictions. Available options are mask_image: -black and white mask image for the given image based on the task type mask_overlay: @@ -256,7 +256,7 @@ class Florence2ImageAnnotatorBlock(ModularPipelineBlocks): type_hint=str, required=True, default="mask_image", - description="""Output type from annotation predictions. Availabe options are + description="""Output type from annotation predictions. Available options are mask_image: -black and white mask image for the given image based on the task type mask_overlay: diff --git a/docs/source/en/modular_diffusers/loop_sequential_pipeline_blocks.md b/docs/source/en/modular_diffusers/loop_sequential_pipeline_blocks.md index a80309de19..74a8689227 100644 --- a/docs/source/en/modular_diffusers/loop_sequential_pipeline_blocks.md +++ b/docs/source/en/modular_diffusers/loop_sequential_pipeline_blocks.md @@ -53,7 +53,7 @@ The loop wrapper can pass additional arguments, like current iteration index, to A loop block is a [`~modular_pipelines.ModularPipelineBlocks`], but the `__call__` method behaves differently. -- It recieves the iteration variable from the loop wrapper. +- It receives the iteration variable from the loop wrapper. - It works directly with the [`~modular_pipelines.BlockState`] instead of the [`~modular_pipelines.PipelineState`]. - It doesn't require retrieving or updating the [`~modular_pipelines.BlockState`]. diff --git a/examples/cogvideo/train_cogvideox_image_to_video_lora.py b/examples/cogvideo/train_cogvideox_image_to_video_lora.py index 113d9b5739..001934298a 100644 --- a/examples/cogvideo/train_cogvideox_image_to_video_lora.py +++ b/examples/cogvideo/train_cogvideox_image_to_video_lora.py @@ -149,13 +149,13 @@ def get_args(): "--validation_prompt", type=str, default=None, - help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_seperator' string.", + help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_separator' string.", ) parser.add_argument( "--validation_images", type=str, default=None, - help="One or more image path(s) that is used during validation to verify that the model is learning. Multiple validation paths should be separated by the '--validation_prompt_seperator' string. These should correspond to the order of the validation prompts.", + help="One or more image path(s) that is used during validation to verify that the model is learning. Multiple validation paths should be separated by the '--validation_prompt_separator' string. These should correspond to the order of the validation prompts.", ) parser.add_argument( "--validation_prompt_separator", diff --git a/examples/cogvideo/train_cogvideox_lora.py b/examples/cogvideo/train_cogvideox_lora.py index bcafe4ecf5..f6f2dc83a3 100644 --- a/examples/cogvideo/train_cogvideox_lora.py +++ b/examples/cogvideo/train_cogvideox_lora.py @@ -140,7 +140,7 @@ def get_args(): "--validation_prompt", type=str, default=None, - help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_seperator' string.", + help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_separator' string.", ) parser.add_argument( "--validation_prompt_separator", diff --git a/examples/research_projects/onnxruntime/text_to_image/README.md b/examples/research_projects/onnxruntime/text_to_image/README.md index f398f08166..1d688471ba 100644 --- a/examples/research_projects/onnxruntime/text_to_image/README.md +++ b/examples/research_projects/onnxruntime/text_to_image/README.md @@ -4,7 +4,7 @@ The `train_text_to_image.py` script shows how to fine-tune stable diffusion mode ___Note___: -___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparamters to get the best result on your dataset.___ +___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___ ## Running locally with PyTorch diff --git a/examples/research_projects/sdxl_flax/sdxl_single.py b/examples/research_projects/sdxl_flax/sdxl_single.py index 5b9b862d99..c3cbf6ca24 100644 --- a/examples/research_projects/sdxl_flax/sdxl_single.py +++ b/examples/research_projects/sdxl_flax/sdxl_single.py @@ -18,7 +18,7 @@ cc.initialize_cache("/tmp/sdxl_cache") NUM_DEVICES = jax.device_count() # 1. Let's start by downloading the model and loading it into our pipeline class -# Adhering to JAX's functional approach, the model's parameters are returned seperatetely and +# Adhering to JAX's functional approach, the model's parameters are returned separately and # will have to be passed to the pipeline during inference pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True diff --git a/src/diffusers/modular_pipelines/qwenimage/before_denoise.py b/src/diffusers/modular_pipelines/qwenimage/before_denoise.py index 0c66d6ea33..e14164229c 100644 --- a/src/diffusers/modular_pipelines/qwenimage/before_denoise.py +++ b/src/diffusers/modular_pipelines/qwenimage/before_denoise.py @@ -455,7 +455,7 @@ class QwenImageSetTimestepsStep(ModularPipelineBlocks): @property def description(self) -> str: - return "Step that sets the the scheduler's timesteps for text-to-image generation. Should be run after prepare latents step." + return "Step that sets the scheduler's timesteps for text-to-image generation. Should be run after prepare latents step." @property def expected_components(self) -> List[ComponentSpec]: @@ -579,7 +579,7 @@ class QwenImageSetTimestepsWithStrengthStep(ModularPipelineBlocks): @property def description(self) -> str: - return "Step that sets the the scheduler's timesteps for image-to-image generation, and inpainting. Should be run after prepare latents step." + return "Step that sets the scheduler's timesteps for image-to-image generation, and inpainting. Should be run after prepare latents step." @property def expected_components(self) -> List[ComponentSpec]: diff --git a/src/diffusers/pipelines/lucy/pipeline_lucy_edit.py b/src/diffusers/pipelines/lucy/pipeline_lucy_edit.py index 69f69d5768..8065a17b78 100644 --- a/src/diffusers/pipelines/lucy/pipeline_lucy_edit.py +++ b/src/diffusers/pipelines/lucy/pipeline_lucy_edit.py @@ -14,7 +14,7 @@ # limitations under the License. # # Modifications by Decart AI Team: -# - Based on pipeline_wan.py, but with supports recieving a condition video appended to the channel dimension. +# - Based on pipeline_wan.py, but with supports receiving a condition video appended to the channel dimension. import html from typing import Any, Callable, Dict, List, Optional, Tuple, Union