1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Moving certain pipelines slow tests to nightly (#4469)

* move audioldm tests to nightly

* move kandinsky im2img ddpm test to nightly

* move flax dpm test to nightly

* move diffedit dpm test to nightly

* move fp16 slow tests to nightly
This commit is contained in:
Dhruv Nair
2023-08-07 17:28:56 +02:00
committed by GitHub
parent 4367b8a300
commit 71c8224159
5 changed files with 114 additions and 5 deletions

View File

@@ -36,7 +36,7 @@ from diffusers import (
PNDMScheduler,
UNet2DConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils import is_xformers_available, nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
@@ -408,6 +408,27 @@ class AudioLDMPipelineSlowTests(unittest.TestCase):
max_diff = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
@nightly
class AudioLDMPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16))
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
inputs = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def test_audioldm_lms(self):
audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm")
audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)

View File

@@ -31,7 +31,7 @@ from diffusers import (
VQModel,
)
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
@@ -344,6 +344,16 @@ class KandinskyImg2ImgPipelineIntegrationTests(unittest.TestCase):
assert_mean_pixel_difference(image, expected_image)
@nightly
@require_torch_gpu
class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_img2img_ddpm(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"

View File

@@ -32,7 +32,7 @@ from diffusers import (
StableDiffusionDiffEditPipeline,
UNet2DConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils import load_image, nightly, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
@@ -347,6 +347,25 @@ class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
)
assert np.abs((expected_image - image).max()) < 5e-1
@nightly
@require_torch_gpu
class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def setUpClass(cls):
raw_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png"
)
raw_image = raw_image.convert("RGB").resize((768, 768))
cls.raw_image = raw_image
def test_stable_diffusion_diffedit_dpm(self):
generator = torch.manual_seed(0)

View File

@@ -17,7 +17,7 @@ import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils import is_flax_available, nightly, slow
from diffusers.utils.testing_utils import require_flax
@@ -65,6 +65,15 @@ class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase):
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
@nightly
@require_flax
class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_dpm_flax(self):
model_id = "stabilityai/stable-diffusion-2"
scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")

View File

@@ -20,7 +20,7 @@ from diffusers import (
UniDiffuserPipeline,
UniDiffuserTextDecoder,
)
from diffusers.utils import floats_tensor, load_image, randn_tensor, slow, torch_device
from diffusers.utils import floats_tensor, load_image, nightly, randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
@@ -619,6 +619,56 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
expected_text_prefix = "An astronaut"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
@nightly
@require_torch_gpu
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, seed=0, generate_latents=False):
generator = torch.manual_seed(seed)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 8.0,
"output_type": "numpy",
}
if generate_latents:
latents = self.get_fixed_latents(device, seed=seed)
for latent_name, latent_tensor in latents.items():
inputs[latent_name] = latent_tensor
return inputs
def get_fixed_latents(self, device, seed=0):
if type(device) == str:
device = torch.device(device)
latent_device = torch.device("cpu")
generator = torch.Generator(device=latent_device).manual_seed(seed)
# Hardcode the shapes for now.
prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32)
vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32)
clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32)
# Move latents onto desired device.
prompt_latents = prompt_latents.to(device)
vae_latents = vae_latents.to(device)
clip_latents = clip_latents.to(device)
latents = {
"prompt_latents": prompt_latents,
"vae_latents": vae_latents,
"clip_latents": clip_latents,
}
return latents
def test_unidiffuser_default_joint_v1_fp16(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
pipe.to(torch_device)