mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
Add option to use fixed latents for UniDiffuserPipelineSlowTests and fix issue in modeling_text_decoder.py.
This commit is contained in:
@@ -234,7 +234,7 @@ class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
|
||||
tokens = None
|
||||
scores = None
|
||||
seq_lengths = torch.ones(beam_size, device=device)
|
||||
is_stopped = torch.zeros(beam_size, device=device, dtype=bool)
|
||||
is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool)
|
||||
|
||||
if embed is not None:
|
||||
generated = embed
|
||||
|
||||
@@ -522,7 +522,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def get_inputs(self, seed=0):
|
||||
def get_inputs(self, device, seed=0, generate_latents=False):
|
||||
generator = torch.manual_seed(seed)
|
||||
image = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
|
||||
@@ -535,7 +535,33 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
||||
"guidance_scale": 8.0,
|
||||
"output_type": "numpy",
|
||||
}
|
||||
if generate_latents:
|
||||
latents = self.get_fixed_latents(device, seed=seed)
|
||||
for latent_name, latent_tensor in latents.items():
|
||||
inputs[latent_name] = latent_tensor
|
||||
return inputs
|
||||
|
||||
def get_fixed_latents(self, device, seed=0):
|
||||
if type(device) == str:
|
||||
device = torch.device(device)
|
||||
latent_device = torch.device("cpu")
|
||||
generator = torch.Generator(device=latent_device).manual_seed(seed)
|
||||
# Hardcode the shapes for now.
|
||||
prompt_latents = randn_tensor((1, 77, 32), generator=generator, device=device, dtype=torch.float32)
|
||||
vae_latents = randn_tensor((1, 4, 16, 16), generator=generator, device=device, dtype=torch.float32)
|
||||
clip_latents = randn_tensor((1, 1, 32), generator=generator, device=device, dtype=torch.float32)
|
||||
|
||||
# Move latents onto desired device.
|
||||
prompt_latents = prompt_latents.to(device)
|
||||
vae_latents = vae_latents.to(device)
|
||||
clip_latents = clip_latents.to(device)
|
||||
|
||||
latents = {
|
||||
"prompt_latents": prompt_latents,
|
||||
"vae_latents": vae_latents,
|
||||
"clip_latents": clip_latents,
|
||||
}
|
||||
return latents
|
||||
|
||||
def test_unidiffuser_default_joint_v1(self):
|
||||
pipe = UniDiffuserPipeline.from_pretrained("dg845/unidiffuser-diffusers")
|
||||
|
||||
Reference in New Issue
Block a user