1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

[Tests] Lower required memory for clip guided and fix super edge-case git pipeline module bug (#754)

* [Tests] Lower required memory

* fix

* up

* uP
This commit is contained in:
Patrick von Platen
2022-10-06 19:15:26 +02:00
committed by GitHub
parent 2fa55fc7d4
commit ae672d58ef
2 changed files with 8 additions and 3 deletions

View File

@@ -259,7 +259,8 @@ def get_cached_module_file(
local_files_only=local_files_only,
use_auth_token=False,
)
submodule = "local"
submodule = "git"
module_file = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
@@ -288,7 +289,7 @@ def get_cached_module_file(
full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(full_submodule)
submodule_path = Path(HF_MODULES_CACHE) / full_submodule
if submodule == "local":
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.

View File

@@ -112,18 +112,22 @@ class CustomPipelineTests(unittest.TestCase):
assert output_str == "This is a local test"
@slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
def test_load_pipeline_from_git(self):
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id)
clip_model = CLIPModel.from_pretrained(clip_model_id)
clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
custom_pipeline="clip_guided_stable_diffusion",
clip_model=clip_model,
feature_extractor=feature_extractor,
torch_dtype=torch.float16,
revision="fp16",
)
pipeline.enable_attention_slicing()
pipeline = pipeline.to(torch_device)
# NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under: