mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
Add generic support for Intel Gaudi accelerator (hpu device) (#11328)
* Add generic support for Intel Gaudi accelerator (hpu device) Signed-off-by: Daniel Socek <daniel.socek@intel.com> Co-authored-by: Libin Tang <libin.tang@intel.com> * Add loggers for generic HPU support Signed-off-by: Daniel Socek <daniel.socek@intel.com> * Refactor hpu support with is_hpu_available() logic Signed-off-by: Daniel Socek <daniel.socek@intel.com> * Fix style for hpu support update Signed-off-by: Daniel Socek <daniel.socek@intel.com> * Decouple soft HPU check from hard device validation to support HPU migration Signed-off-by: Daniel Socek <daniel.socek@intel.com> --------- Signed-off-by: Daniel Socek <daniel.socek@intel.com> Co-authored-by: Libin Tang <libin.tang@intel.com> Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
This commit is contained in:
@@ -58,6 +58,7 @@ from ..utils import (
|
||||
_is_valid_type,
|
||||
is_accelerate_available,
|
||||
is_accelerate_version,
|
||||
is_hpu_available,
|
||||
is_torch_npu_available,
|
||||
is_torch_version,
|
||||
is_transformers_version,
|
||||
@@ -450,6 +451,20 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
|
||||
f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading."
|
||||
)
|
||||
|
||||
# Enable generic support for Intel Gaudi accelerator using GPU/HPU migration
|
||||
if device_type == "hpu" and kwargs.pop("hpu_migration", True) and is_hpu_available():
|
||||
os.environ["PT_HPU_GPU_MIGRATION"] = "1"
|
||||
logger.debug("Environment variable set: PT_HPU_GPU_MIGRATION=1")
|
||||
|
||||
import habana_frameworks.torch # noqa: F401
|
||||
|
||||
# HPU hardware check
|
||||
if not (hasattr(torch, "hpu") and torch.hpu.is_available()):
|
||||
raise ValueError("You are trying to call `.to('hpu')` but HPU device is unavailable.")
|
||||
|
||||
os.environ["PT_HPU_MAX_COMPOUND_OP_SIZE"] = "1"
|
||||
logger.debug("Environment variable set: PT_HPU_MAX_COMPOUND_OP_SIZE=1")
|
||||
|
||||
module_names, _ = self._get_signature_keys(self)
|
||||
modules = [getattr(self, n, None) for n in module_names]
|
||||
modules = [m for m in modules if isinstance(m, torch.nn.Module)]
|
||||
|
||||
@@ -71,6 +71,7 @@ from .import_utils import (
|
||||
is_gguf_version,
|
||||
is_google_colab,
|
||||
is_hf_hub_version,
|
||||
is_hpu_available,
|
||||
is_inflect_available,
|
||||
is_invisible_watermark_available,
|
||||
is_k_diffusion_available,
|
||||
|
||||
@@ -353,6 +353,10 @@ def is_timm_available():
|
||||
return _timm_available
|
||||
|
||||
|
||||
def is_hpu_available():
|
||||
return all(importlib.util.find_spec(lib) for lib in ("habana_frameworks", "habana_frameworks.torch"))
|
||||
|
||||
|
||||
# docstyle-ignore
|
||||
FLAX_IMPORT_ERROR = """
|
||||
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
|
||||
|
||||
Reference in New Issue
Block a user