1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-29 07:22:12 +03:00

fix an issue that ipex occupy too much memory, it will not impact per… (#5625)

* fix an issue that ipex occupy too much memory, it will not impact performance

* make style

---------

Co-authored-by: root <jun.chen@intel.com>
Co-authored-by: Meng Guoqing <guoqing.meng@intel.com>
This commit is contained in:
ginjia
2023-11-20 19:43:29 +08:00
committed by GitHub
parent fda297703f
commit 4abbbff618

View File

@@ -252,9 +252,7 @@ class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin
# optimize with ipex
if dtype == torch.bfloat16:
self.unet = ipex.optimize(
self.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=unet_input_example
)
self.unet = ipex.optimize(self.unet.eval(), dtype=torch.bfloat16, inplace=True)
self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True)
self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
if self.safety_checker is not None:
@@ -264,8 +262,6 @@ class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin
self.unet.eval(),
dtype=torch.float32,
inplace=True,
sample_input=unet_input_example,
level="O1",
weights_prepack=True,
auto_kernel_selection=False,
)
@@ -273,7 +269,6 @@ class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin
self.vae.decoder.eval(),
dtype=torch.float32,
inplace=True,
level="O1",
weights_prepack=True,
auto_kernel_selection=False,
)
@@ -281,7 +276,6 @@ class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin
self.text_encoder.eval(),
dtype=torch.float32,
inplace=True,
level="O1",
weights_prepack=True,
auto_kernel_selection=False,
)
@@ -290,7 +284,6 @@ class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin
self.safety_checker.eval(),
dtype=torch.float32,
inplace=True,
level="O1",
weights_prepack=True,
auto_kernel_selection=False,
)