From 1edd0debaa3103e0dc230551c36e22ff60a56af4 Mon Sep 17 00:00:00 2001 From: YiYi Xu Date: Thu, 3 Aug 2023 17:34:37 -1000 Subject: [PATCH] fix-format (#4458) make style Co-authored-by: yiyixuxu --- .../pipelines/kandinsky/pipeline_kandinsky_combined.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 804b5ded34..cb97c505b8 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -203,10 +203,10 @@ class KandinskyCombinedPipeline(DiffusionPipeline): def enable_sequential_cpu_offload(self, gpu_id=0): r""" - Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 Accelerate, significantly reducing memory usage. Models are moved to a - `torch.device('meta')` and loaded on a GPU only when their specific submodule's `forward` method is called. - Offloading happens on a submodule basis. Memory savings are higher than using - `enable_model_cpu_offload`, but performance is lower. + Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 + Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a + GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. + Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)