1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

cpu offloading: mutli GPU support (#1143)

mutli GPU support
This commit is contained in:
dblunk88
2022-11-16 11:40:16 -05:00
committed by GitHub
parent 65d136e067
commit 09d0546ad0

View File

@@ -178,7 +178,7 @@ class StableDiffusionPipeline(DiffusionPipeline):
# set slice_size = `None` to disable `attention slicing`
self.enable_attention_slicing(None)
def enable_sequential_cpu_offload(self):
def enable_sequential_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
@@ -189,7 +189,7 @@ class StableDiffusionPipeline(DiffusionPipeline):
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
device = torch.device("cuda")
device = torch.device(f"cuda:{gpu_id}")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None: