From 57f7d2593427bd2cbf7d15d32844cfc5f7717d3f Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 12 Jan 2023 11:23:52 +0100 Subject: [PATCH] [CPU offload] correct cpu offload (#1968) * [CPU offload] correct cpu offload * [CPU offload] correct cpu offload * finish * finish * Update docs/source/en/optimization/fp16.mdx Co-authored-by: Pedro Cuenca * Update src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py Co-authored-by: Pedro Cuenca Co-authored-by: Pedro Cuenca --- docs/source/en/optimization/fp16.mdx | 12 ++++++------ .../alt_diffusion/pipeline_alt_diffusion.py | 7 ++----- .../alt_diffusion/pipeline_alt_diffusion_img2img.py | 7 ++----- .../stable_diffusion/pipeline_cycle_diffusion.py | 7 ++----- .../stable_diffusion/pipeline_stable_diffusion.py | 7 ++----- .../pipeline_stable_diffusion_img2img.py | 7 ++----- .../pipeline_stable_diffusion_inpaint.py | 7 ++----- .../pipeline_stable_diffusion_inpaint_legacy.py | 7 ++----- .../pipeline_stable_diffusion_k_diffusion.py | 7 ++----- 9 files changed, 22 insertions(+), 46 deletions(-) diff --git a/docs/source/en/optimization/fp16.mdx b/docs/source/en/optimization/fp16.mdx index e0c3d99e84..c171425753 100644 --- a/docs/source/en/optimization/fp16.mdx +++ b/docs/source/en/optimization/fp16.mdx @@ -149,7 +149,7 @@ You may see a small performance boost in VAE decode on multi-image batches. Ther ## Offloading to CPU with accelerate for memory savings -For additional memory savings, you can offload the weights to CPU and load them to GPU when performing the forward pass. +For additional memory savings, you can offload the weights to CPU and only load them to GPU when performing the forward pass. To perform CPU offloading, all you have to do is invoke [`~StableDiffusionPipeline.enable_sequential_cpu_offload`]: @@ -162,16 +162,15 @@ pipe = StableDiffusionPipeline.from_pretrained( torch_dtype=torch.float16, ) -pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_sequential_cpu_offload() image = pipe(prompt).images[0] ``` -And you can get the memory consumption to < 2GB. +And you can get the memory consumption to < 3GB. -If is also possible to chain it with attention slicing for minimal memory consumption, running it in as little as < 800mb of GPU vRAM: +If is also possible to chain it with attention slicing for minimal memory consumption (< 2GB). ```Python import torch @@ -182,7 +181,6 @@ pipe = StableDiffusionPipeline.from_pretrained( torch_dtype=torch.float16, ) -pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_sequential_cpu_offload() @@ -191,6 +189,8 @@ pipe.enable_attention_slicing(1) image = pipe(prompt).images[0] ``` +**Note**: When using `enable_sequential_cpu_offload()`, it is important to **not** move the pipeline to CUDA beforehand or else the gain in memory consumption will only be minimal. See [this issue](https://github.com/huggingface/diffusers/issues/1934) for more information. + ## Using Channels Last memory format Channels last memory format is an alternative way of ordering NCHW tensors in memory preserving dimensions ordering. Channels last tensors ordered in such a way that channels become the densest dimension (aka storing images pixel-per-pixel). Since not all operators currently support channels last format it may result in a worst performance, so it's better to try it and see if it works for your model. @@ -357,4 +357,4 @@ with torch.inference_mode(): # optional: You can disable it via # pipe.disable_xformers_memory_efficient_attention() -``` \ No newline at end of file +``` diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py index 4d8678b468..e9f61b4e48 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py +++ b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py @@ -211,13 +211,10 @@ class AltDiffusionPipeline(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, device, offload_buffers=True) @property def _execution_device(self): diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py index 30e0bbec7e..4916219a6c 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -233,13 +233,10 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property def _execution_device(self): diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py index 5a9b9f6f4e..d47a6783a5 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py @@ -236,13 +236,10 @@ class CycleDiffusionPipeline(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py index 2f9385e8a3..24447c6a67 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py @@ -208,13 +208,10 @@ class StableDiffusionPipeline(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property def _execution_device(self): diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py index e2c8ecb829..4b82638be8 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py @@ -238,13 +238,10 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py index a6d03591be..1eb7109375 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py @@ -272,13 +272,10 @@ class StableDiffusionInpaintPipeline(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py index 3043ee2001..1f0be3ac0b 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py @@ -205,13 +205,10 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py index 330d6ac0de..03bcea6f4f 100755 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -137,13 +137,10 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline): device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) + cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device