1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-29 07:22:12 +03:00

enable telemetry for single file loading when using GGUF.

This commit is contained in:
sayakpaul
2025-04-10 18:20:24 +05:30
parent ea5a6a8b7c
commit fe176857a2
2 changed files with 11 additions and 1 deletions

View File

@@ -22,6 +22,7 @@ from huggingface_hub.utils import validate_hf_hub_args
from typing_extensions import Self
from ..quantizers import DiffusersAutoQuantizer
from .. import __version__
from ..utils import deprecate, is_accelerate_available, logging
from .single_file_utils import (
SingleFileComponentError,
@@ -260,6 +261,11 @@ class FromOriginalModelMixin:
device = kwargs.pop("device", None)
disable_mmap = kwargs.pop("disable_mmap", False)
user_agent = {"diffusers": __version__, "file_type": "single_file", "framework": "pytorch"}
# In order to ensure popular quantization methods are supported. Can be disable with `disable_telemetry`
if quantization_config is not None:
user_agent["quant"] = quantization_config.quant_method.value
if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype):
torch_dtype = torch.float32
logger.warning(
@@ -278,6 +284,7 @@ class FromOriginalModelMixin:
local_files_only=local_files_only,
revision=revision,
disable_mmap=disable_mmap,
user_agent=user_agent,
)
if quantization_config is not None:
hf_quantizer = DiffusersAutoQuantizer.from_config(quantization_config)

View File

@@ -405,13 +405,16 @@ def load_single_file_checkpoint(
local_files_only=None,
revision=None,
disable_mmap=False,
user_agent=None
):
if user_agent is None:
user_agent = {"file_type": "single_file", "framework": "pytorch"}
if os.path.isfile(pretrained_model_link_or_path):
pretrained_model_link_or_path = pretrained_model_link_or_path
else:
repo_id, weights_name = _extract_repo_id_and_weights_name(pretrained_model_link_or_path)
user_agent = {"file_type": "single_file", "framework": "pytorch"}
pretrained_model_link_or_path = _get_model_file(
repo_id,
weights_name=weights_name,