1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

[I2VGenXL] attention_head_dim in the UNet (#6872)

* attention_head_dim

* debug

* print more info

* correct num_attention_heads behaviour

* down_block_num_attention_heads -> num_attention_heads.

* correct the image link in doc.

* add: deprecation for num_attention_head

* fix: test argument to use attention_head_dim

* more fixes.

* quality

* address comments.

* remove depcrecation.
This commit is contained in:
Sayak Paul
2024-02-08 12:30:14 +05:30
committed by GitHub
parent aa82df52e7
commit 491a933a1b
4 changed files with 15 additions and 3 deletions

View File

@@ -158,6 +158,7 @@ class BasicTransformerBlock(nn.Module):
super().__init__()
self.only_cross_attention = only_cross_attention
# We keep these boolean flags for backward-compatibility.
self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
self.use_ada_layer_norm_single = norm_type == "ada_norm_single"

View File

@@ -120,6 +120,7 @@ class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, normalization and activation layers is skipped in post-processing.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 64): Attention head dim.
num_attention_heads (`int`, *optional*): The number of attention heads.
"""
@@ -147,10 +148,19 @@ class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
layers_per_block: int = 2,
norm_num_groups: Optional[int] = 32,
cross_attention_dim: int = 1024,
num_attention_heads: Optional[Union[int, Tuple[int]]] = 64,
attention_head_dim: Union[int, Tuple[int]] = 64,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
):
super().__init__()
# When we first integrated the UNet into the library, we didn't have `attention_head_dim`. As a consequence
# of that, we used `num_attention_heads` for arguments that actually denote attention head dimension. This
# is why we ignore `num_attention_heads` and calculate it from `attention_head_dims` below.
# This is still an incorrect way of calculating `num_attention_heads` but we need to stick to it
# without running proper depcrecation cycles for the {down,mid,up} blocks which are a
# part of the public API.
num_attention_heads = attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(

View File

@@ -46,7 +46,7 @@ EXAMPLE_DOC_STRING = """
>>> pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
>>> pipeline.enable_model_cpu_offload()
>>> image_url = "https://github.com/ali-vilab/i2vgen-xl/blob/main/data/test_images/img_0009.png?raw=true"
>>> image_url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/i2vgen_xl_images/img_0009.png"
>>> image = load_image(image_url).convert("RGB")
>>> prompt = "Papers were floating in the air on a table in the library"

View File

@@ -80,7 +80,8 @@ class I2VGenXLPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"),
up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"),
cross_attention_dim=4,
num_attention_heads=4,
attention_head_dim=4,
num_attention_heads=None,
norm_num_groups=2,
)