mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
Fix typos
This commit is contained in:
@@ -37,7 +37,7 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.m
|
||||
|
||||
## Inference with under 8GB GPU VRAM
|
||||
|
||||
Run the [`PixArtAlphaPipeline`] with under 8GB GPU VRAM by loading the text encoder in 8-bit precision. Let's walk through a full-fledged example.
|
||||
Run the [`PixArtAlphaPipeline`] with under 8GB GPU VRAM by loading the text encoder in 8-bit precision. Let's walk through a full-fledged example.
|
||||
|
||||
First, install the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library:
|
||||
|
||||
@@ -75,10 +75,10 @@ with torch.no_grad():
|
||||
prompt_embeds, prompt_attention_mask, negative_embeds, negative_prompt_attention_mask = pipe.encode_prompt(prompt)
|
||||
```
|
||||
|
||||
Since text embeddings have been computed, remove the `text_encoder` and `pipe` from the memory, and free up som GPU VRAM:
|
||||
Since text embeddings have been computed, remove the `text_encoder` and `pipe` from the memory, and free up some GPU VRAM:
|
||||
|
||||
```python
|
||||
import gc
|
||||
import gc
|
||||
|
||||
def flush():
|
||||
gc.collect()
|
||||
@@ -99,7 +99,7 @@ pipe = PixArtAlphaPipeline.from_pretrained(
|
||||
).to("cuda")
|
||||
|
||||
latents = pipe(
|
||||
negative_prompt=None,
|
||||
negative_prompt=None,
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=negative_embeds,
|
||||
prompt_attention_mask=prompt_attention_mask,
|
||||
@@ -146,4 +146,3 @@ While loading the `text_encoder`, you set `load_in_8bit` to `True`. You could al
|
||||
[[autodoc]] PixArtAlphaPipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
@@ -39,7 +39,7 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers)
|
||||
|
||||
## Inference with under 8GB GPU VRAM
|
||||
|
||||
Run the [`PixArtSigmaPipeline`] with under 8GB GPU VRAM by loading the text encoder in 8-bit precision. Let's walk through a full-fledged example.
|
||||
Run the [`PixArtSigmaPipeline`] with under 8GB GPU VRAM by loading the text encoder in 8-bit precision. Let's walk through a full-fledged example.
|
||||
|
||||
First, install the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library:
|
||||
|
||||
@@ -59,7 +59,6 @@ text_encoder = T5EncoderModel.from_pretrained(
|
||||
subfolder="text_encoder",
|
||||
load_in_8bit=True,
|
||||
device_map="auto",
|
||||
|
||||
)
|
||||
pipe = PixArtSigmaPipeline.from_pretrained(
|
||||
"PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
|
||||
@@ -77,10 +76,10 @@ with torch.no_grad():
|
||||
prompt_embeds, prompt_attention_mask, negative_embeds, negative_prompt_attention_mask = pipe.encode_prompt(prompt)
|
||||
```
|
||||
|
||||
Since text embeddings have been computed, remove the `text_encoder` and `pipe` from the memory, and free up som GPU VRAM:
|
||||
Since text embeddings have been computed, remove the `text_encoder` and `pipe` from the memory, and free up some GPU VRAM:
|
||||
|
||||
```python
|
||||
import gc
|
||||
import gc
|
||||
|
||||
def flush():
|
||||
gc.collect()
|
||||
@@ -101,7 +100,7 @@ pipe = PixArtSigmaPipeline.from_pretrained(
|
||||
).to("cuda")
|
||||
|
||||
latents = pipe(
|
||||
negative_prompt=None,
|
||||
negative_prompt=None,
|
||||
prompt_embeds=prompt_embeds,
|
||||
negative_prompt_embeds=negative_embeds,
|
||||
prompt_attention_mask=prompt_attention_mask,
|
||||
@@ -148,4 +147,3 @@ While loading the `text_encoder`, you set `load_in_8bit` to `True`. You could al
|
||||
[[autodoc]] PixArtSigmaPipeline
|
||||
- all
|
||||
- __call__
|
||||
|
||||
@@ -138,15 +138,15 @@ Because Marigold's latent space is compatible with the base Stable Diffusion, it
|
||||
```diff
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
|
||||
+ pipe.vae = diffusers.AutoencoderTiny.from_pretrained(
|
||||
+ "madebyollin/taesd", torch_dtype=torch.float16
|
||||
+ ).cuda()
|
||||
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
depth = pipe(image)
|
||||
```
|
||||
@@ -156,13 +156,13 @@ As suggested in [Optimizations](torch2.0), adding `torch.compile` may squeeze ex
|
||||
```diff
|
||||
import diffusers
|
||||
import torch
|
||||
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
|
||||
|
||||
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
depth = pipe(image)
|
||||
```
|
||||
@@ -208,7 +208,7 @@ model_paper_kwargs = {
|
||||
diffusers.schedulers.LCMScheduler: {
|
||||
"num_inference_steps": 4,
|
||||
"ensemble_size": 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
@@ -261,7 +261,7 @@ model_paper_kwargs = {
|
||||
diffusers.schedulers.LCMScheduler: {
|
||||
"num_inference_steps": 4,
|
||||
"ensemble_size": 10,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
|
||||
@@ -415,7 +415,7 @@ image = diffusers.utils.load_image(
|
||||
|
||||
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
|
||||
"prs-eth/marigold-lcm-v1-0", torch_dtype=torch.float16, variant="fp16"
|
||||
).to("cuda")
|
||||
).to(device)
|
||||
|
||||
depth_image = pipe(image, generator=generator).prediction
|
||||
depth_image = pipe.image_processor.visualize_depth(depth_image, color_map="binary")
|
||||
@@ -423,10 +423,10 @@ depth_image[0].save("motorcycle_controlnet_depth.png")
|
||||
|
||||
controlnet = diffusers.ControlNetModel.from_pretrained(
|
||||
"diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
|
||||
).to("cuda")
|
||||
).to(device)
|
||||
pipe = diffusers.StableDiffusionXLControlNetPipeline.from_pretrained(
|
||||
"SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", controlnet=controlnet
|
||||
).to("cuda")
|
||||
).to(device)
|
||||
pipe.scheduler = diffusers.DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
|
||||
|
||||
controlnet_out = pipe(
|
||||
|
||||
@@ -692,7 +692,7 @@ def print_tree_deps_of(module, all_edges=None):
|
||||
|
||||
def init_test_examples_dependencies() -> Tuple[Dict[str, List[str]], List[str]]:
|
||||
"""
|
||||
The test examples do not import from the examples (which are just scripts, not modules) so we need som extra
|
||||
The test examples do not import from the examples (which are just scripts, not modules) so we need some extra
|
||||
care initializing the dependency map, which is the goal of this function. It initializes the dependency map for
|
||||
example files by linking each example to the example test file for the example framework.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user