1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Optimize test files by fixing CPU-offloading usage (#8409)

* Refactor code to remove unnecessary calls to `to(torch_device)`

* Refactor code to remove unnecessary calls to `to("cuda")`

* Update pipeline_stable_diffusion_diffedit.py
This commit is contained in:
Tolga Cangöz
2024-06-06 22:51:26 +03:00
committed by GitHub
parent 151a56b80e
commit ec1aded12e
16 changed files with 1 additions and 21 deletions

View File

@@ -165,7 +165,7 @@ from PIL import Image
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
# load SD 1.5 based finetuned model
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to("cuda")
pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
scheduler = DDIMScheduler.from_pretrained(
model_id,
subfolder="scheduler",

View File

@@ -1135,7 +1135,6 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
torch.cuda.reset_peak_memory_stats()
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -513,7 +513,6 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -705,7 +705,6 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
@@ -911,7 +910,6 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
)
pipe.vae = vae
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -394,7 +394,6 @@ class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase):
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -462,7 +462,6 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase):
pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -506,7 +506,6 @@ class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -261,7 +261,6 @@ class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
scheduler=pndm,
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -472,7 +472,6 @@ class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase):
model_id,
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -537,7 +537,6 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
prompt = "Andromeda galaxy in a bottle"
pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16)
pipeline = pipeline.to(torch_device)
pipeline.enable_attention_slicing(1)
pipeline.enable_sequential_cpu_offload()

View File

@@ -809,7 +809,6 @@ class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_model_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
@@ -942,7 +941,6 @@ class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
pipe = StableDiffusionAdapterPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", adapter=adapter, safety_checker=None
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -263,7 +263,6 @@ class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase):
pipe = StableDiffusionImageVariationPipeline.from_pretrained(
"lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -415,7 +415,6 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
model_ckpt = "stabilityai/stable-diffusion-2-base"
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()

View File

@@ -206,7 +206,6 @@ class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
)
pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
@@ -228,7 +227,6 @@ class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
torch.cuda.reset_peak_memory_stats()
pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()

View File

@@ -233,7 +233,6 @@ class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
@@ -261,7 +260,6 @@ class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
@@ -289,7 +287,6 @@ class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()

View File

@@ -500,7 +500,6 @@ class UnCLIPPipelineIntegrationTests(unittest.TestCase):
torch.cuda.reset_peak_memory_stats()
pipe = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()