1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

[Local loading] Correct bug with local files only (#4318)

* [Local loading] Correct bug with local files only

* file not found error

* fix

* finish
This commit is contained in:
Patrick von Platen
2023-07-27 16:16:46 +02:00
committed by GitHub
parent 5fd3dca5f3
commit 1926331eaf
8 changed files with 66 additions and 16 deletions

View File

@@ -1474,11 +1474,25 @@ class DiffusionPipeline(ConfigMixin):
user_agent=user_agent,
)
if pipeline_class._load_connected_pipes:
# retrieve pipeline class from local file
cls_name = cls.load_config(os.path.join(cached_folder, "model_index.json")).get("_class_name", None)
pipeline_class = getattr(diffusers, cls_name, None)
if pipeline_class is not None and pipeline_class._load_connected_pipes:
modelcard = ModelCard.load(os.path.join(cached_folder, "README.md"))
connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], [])
for connected_pipe_repo_id in connected_pipes:
DiffusionPipeline.download(connected_pipe_repo_id)
download_kwargs = {
"cache_dir": cache_dir,
"resume_download": resume_download,
"force_download": force_download,
"proxies": proxies,
"local_files_only": local_files_only,
"use_auth_token": use_auth_token,
"variant": variant,
"use_safetensors": use_safetensors,
}
DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs)
return cached_folder

View File

@@ -563,10 +563,10 @@ class SDXLLoraLoaderMixinTests(unittest.TestCase):
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
unet_lora_attn_procs, unet_lora_layers = create_unet_lora_layers(unet)
text_encoder_one_lora_layers = create_text_encoder_lora_layers(text_encoder)

View File

@@ -100,10 +100,10 @@ class StableDiffusionXLPipelineFastTests(PipelineLatentTesterMixin, PipelineTest
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,

View File

@@ -100,10 +100,10 @@ class StableDiffusionXLImg2ImgPipelineFastTests(PipelineLatentTesterMixin, Pipel
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,

View File

@@ -102,10 +102,10 @@ class StableDiffusionXLInpaintPipelineFastTests(PipelineLatentTesterMixin, Pipel
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,

View File

@@ -105,10 +105,10 @@ class StableDiffusionXLInstructPix2PixPipelineFastTests(
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip", local_files_only=True)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,

View File

@@ -374,7 +374,7 @@ class DownloadTests(unittest.TestCase):
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
orig_pipe = StableDiffusionPipeline.from_pretrained(
orig_pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
)
orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")}
@@ -382,7 +382,7 @@ class DownloadTests(unittest.TestCase):
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.request", return_value=response_mock):
# Download this model to make sure it's in the cache.
pipe = StableDiffusionPipeline.from_pretrained(
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
)
comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")}
@@ -392,6 +392,42 @@ class DownloadTests(unittest.TestCase):
if p1.data.ne(p2.data).sum() > 0:
assert False, "Parameters not the same!"
def test_local_files_only_are_used_when_no_internet(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# first check that with local files only the pipeline can only be used if cached
with self.assertRaises(FileNotFoundError):
with tempfile.TemporaryDirectory() as tmpdirname:
orig_pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True, cache_dir=tmpdirname
)
# now download
orig_pipe = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-torch")
# make sure it can be loaded with local_files_only
orig_pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True
)
orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")}
# Under the mock environment we get a 500 error when trying to connect to the internet.
# Make sure it works local_files_only only works here!
with mock.patch("requests.request", return_value=response_mock):
# Download this model to make sure it's in the cache.
pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")}
for m1, m2 in zip(orig_comps.values(), comps.values()):
for p1, p2 in zip(m1.parameters(), m2.parameters()):
if p1.data.ne(p2.data).sum() > 0:
assert False, "Parameters not the same!"
def test_download_from_variant_folder(self):
for safe_avail in [False, True]:
import diffusers

View File

@@ -387,7 +387,7 @@ class PipelineTesterMixin:
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
# make last batch super long
batched_inputs[name][-1] = 2000 * "very long"
batched_inputs[name][-1] = 100 * "very long"
# or else we have images
else:
batched_inputs[name] = batch_size * [value]
@@ -462,7 +462,7 @@ class PipelineTesterMixin:
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
# make last batch super long
batched_inputs[name][-1] = 2000 * "very long"
batched_inputs[name][-1] = 100 * "very long"
# or else we have images
else:
batched_inputs[name] = batch_size * [value]