From 14f6464bef677e47e1ff13a12f4ddd97e7f3e973 Mon Sep 17 00:00:00 2001 From: M Saqlain <118016760+saqlain2204@users.noreply.github.com> Date: Mon, 23 Sep 2024 20:35:50 +0530 Subject: [PATCH] [Tests] Reduce the model size in the lumina test (#8985) * Reduced model size for lumina-tests * Handled failing tests --- tests/pipelines/lumina/test_lumina_nextdit.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pipelines/lumina/test_lumina_nextdit.py b/tests/pipelines/lumina/test_lumina_nextdit.py index d6aeb57b80..5fd0dbf060 100644 --- a/tests/pipelines/lumina/test_lumina_nextdit.py +++ b/tests/pipelines/lumina/test_lumina_nextdit.py @@ -34,19 +34,19 @@ class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterM def get_dummy_components(self): torch.manual_seed(0) transformer = LuminaNextDiT2DModel( - sample_size=16, + sample_size=4, patch_size=2, in_channels=4, - hidden_size=24, + hidden_size=4, num_layers=2, - num_attention_heads=3, + num_attention_heads=1, num_kv_heads=1, multiple_of=16, ffn_dim_multiplier=None, norm_eps=1e-5, learn_sigma=True, qk_norm=True, - cross_attention_dim=32, + cross_attention_dim=8, scaling_factor=1.0, ) torch.manual_seed(0) @@ -57,8 +57,8 @@ class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterM torch.manual_seed(0) config = GemmaConfig( - head_dim=4, - hidden_size=32, + head_dim=2, + hidden_size=8, intermediate_size=37, num_attention_heads=4, num_hidden_layers=2,