mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
* working state from hameerabbasi and iddl
* working state form hameerabbasi and iddl (transformer)
* working state (normalization)
* working state (embeddings)
* add chroma loader
* add chroma to mappings
* add chroma to transformer init
* take out variant stuff
* get decently far in changing variant stuff
* add chroma init
* make chroma output class
* add chroma transformer to dummy tp
* add chroma to init
* add chroma to init
* fix single file
* update
* update
* add chroma to auto pipeline
* add chroma to pipeline init
* change to chroma transformer
* take out variant from blocks
* swap embedder location
* remove prompt_2
* work on swapping text encoders
* remove mask function
* dont modify mask (for now)
* wrap attn mask
* no attn mask (can't get it to work)
* remove pooled prompt embeds
* change to my own unpooled embeddeer
* fix load
* take pooled projections out of transformer
* ensure correct dtype for chroma embeddings
* update
* use dn6 attn mask + fix true_cfg_scale
* use chroma pipeline output
* use DN6 embeddings
* remove guidance
* remove guidance embed (pipeline)
* remove guidance from embeddings
* don't return length
* dont change dtype
* remove unused stuff, fix up docs
* add chroma autodoc
* add .md (oops)
* initial chroma docs
* undo don't change dtype
* undo arxiv change
unsure why that happened
* fix hf papers regression in more places
* Update docs/source/en/api/pipelines/chroma.md
Co-authored-by: Dhruv Nair <dhruv.nair@gmail.com>
* do_cfg -> self.do_classifier_free_guidance
* Update docs/source/en/api/models/chroma_transformer.md
Co-authored-by: Dhruv Nair <dhruv.nair@gmail.com>
* Update chroma.md
* Move chroma layers into transformer
* Remove pruned AdaLayerNorms
* Add chroma fast tests
* (untested) batch cond and uncond
* Add # Copied from for shift
* Update # Copied from statements
* update norm imports
* Revert cond + uncond batching
* Add transformer tests
* move chroma test (oops)
* chroma init
* fix chroma pipeline fast tests
* Update src/diffusers/models/transformers/transformer_chroma.py
Co-authored-by: Dhruv Nair <dhruv.nair@gmail.com>
* Move Approximator and Embeddings
* Fix auto pipeline + make style, quality
* make style
* Apply style fixes
* switch to new input ids
* fix # Copied from error
* remove # Copied from on protected members
* try to fix import
* fix import
* make fix-copes
* revert style fix
* update chroma transformer params
* update chroma transformer approximator init params
* update to pad tokens
* fix batch inference
* Make more pipeline tests work
* Make most transformer tests work
* fix docs
* make style, make quality
* skip batch tests
* fix test skipping
* fix test skipping again
* fix for tests
* Fix all pipeline test
* update
* push local changes, fix docs
* add encoder test, remove pooled dim
* default proj dim
* fix tests
* fix equal size list input
* update
* push local changes, fix docs
* add encoder test, remove pooled dim
* default proj dim
* fix tests
* fix equal size list input
* Revert "fix equal size list input"
This reverts commit 3fe4ad67d5.
* update
* update
* update
* update
* update
---------
Co-authored-by: Dhruv Nair <dhruv.nair@gmail.com>
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
186 lines
6.6 KiB
Python
186 lines
6.6 KiB
Python
# coding=utf-8
|
|
# Copyright 2024 HuggingFace Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import unittest
|
|
|
|
import torch
|
|
|
|
from diffusers import FluxTransformer2DModel
|
|
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
|
|
from diffusers.models.embeddings import ImageProjection
|
|
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
|
|
|
|
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
|
|
|
|
|
|
enable_full_determinism()
|
|
|
|
|
|
def create_flux_ip_adapter_state_dict(model):
|
|
# "ip_adapter" (cross-attention weights)
|
|
ip_cross_attn_state_dict = {}
|
|
key_id = 0
|
|
|
|
for name in model.attn_processors.keys():
|
|
if name.startswith("single_transformer_blocks"):
|
|
continue
|
|
|
|
joint_attention_dim = model.config["joint_attention_dim"]
|
|
hidden_size = model.config["num_attention_heads"] * model.config["attention_head_dim"]
|
|
sd = FluxIPAdapterJointAttnProcessor2_0(
|
|
hidden_size=hidden_size, cross_attention_dim=joint_attention_dim, scale=1.0
|
|
).state_dict()
|
|
ip_cross_attn_state_dict.update(
|
|
{
|
|
f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"],
|
|
f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"],
|
|
f"{key_id}.to_k_ip.bias": sd["to_k_ip.0.bias"],
|
|
f"{key_id}.to_v_ip.bias": sd["to_v_ip.0.bias"],
|
|
}
|
|
)
|
|
|
|
key_id += 1
|
|
|
|
# "image_proj" (ImageProjection layer weights)
|
|
|
|
image_projection = ImageProjection(
|
|
cross_attention_dim=model.config["joint_attention_dim"],
|
|
image_embed_dim=(
|
|
model.config["pooled_projection_dim"] if "pooled_projection_dim" in model.config.keys() else 768
|
|
),
|
|
num_image_text_embeds=4,
|
|
)
|
|
|
|
ip_image_projection_state_dict = {}
|
|
sd = image_projection.state_dict()
|
|
ip_image_projection_state_dict.update(
|
|
{
|
|
"proj.weight": sd["image_embeds.weight"],
|
|
"proj.bias": sd["image_embeds.bias"],
|
|
"norm.weight": sd["norm.weight"],
|
|
"norm.bias": sd["norm.bias"],
|
|
}
|
|
)
|
|
|
|
del sd
|
|
ip_state_dict = {}
|
|
ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict})
|
|
return ip_state_dict
|
|
|
|
|
|
class FluxTransformerTests(ModelTesterMixin, unittest.TestCase):
|
|
model_class = FluxTransformer2DModel
|
|
main_input_name = "hidden_states"
|
|
# We override the items here because the transformer under consideration is small.
|
|
model_split_percents = [0.7, 0.6, 0.6]
|
|
|
|
# Skip setting testing with default: AttnProcessor
|
|
uses_custom_attn_processor = True
|
|
|
|
@property
|
|
def dummy_input(self):
|
|
batch_size = 1
|
|
num_latent_channels = 4
|
|
num_image_channels = 3
|
|
height = width = 4
|
|
sequence_length = 48
|
|
embedding_dim = 32
|
|
|
|
hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device)
|
|
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
|
|
pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(torch_device)
|
|
text_ids = torch.randn((sequence_length, num_image_channels)).to(torch_device)
|
|
image_ids = torch.randn((height * width, num_image_channels)).to(torch_device)
|
|
timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size)
|
|
|
|
return {
|
|
"hidden_states": hidden_states,
|
|
"encoder_hidden_states": encoder_hidden_states,
|
|
"img_ids": image_ids,
|
|
"txt_ids": text_ids,
|
|
"pooled_projections": pooled_prompt_embeds,
|
|
"timestep": timestep,
|
|
}
|
|
|
|
@property
|
|
def input_shape(self):
|
|
return (16, 4)
|
|
|
|
@property
|
|
def output_shape(self):
|
|
return (16, 4)
|
|
|
|
def prepare_init_args_and_inputs_for_common(self):
|
|
init_dict = {
|
|
"patch_size": 1,
|
|
"in_channels": 4,
|
|
"num_layers": 1,
|
|
"num_single_layers": 1,
|
|
"attention_head_dim": 16,
|
|
"num_attention_heads": 2,
|
|
"joint_attention_dim": 32,
|
|
"pooled_projection_dim": 32,
|
|
"axes_dims_rope": [4, 4, 8],
|
|
}
|
|
|
|
inputs_dict = self.dummy_input
|
|
return init_dict, inputs_dict
|
|
|
|
def test_deprecated_inputs_img_txt_ids_3d(self):
|
|
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
|
model = self.model_class(**init_dict)
|
|
model.to(torch_device)
|
|
model.eval()
|
|
|
|
with torch.no_grad():
|
|
output_1 = model(**inputs_dict).to_tuple()[0]
|
|
|
|
# update inputs_dict with txt_ids and img_ids as 3d tensors (deprecated)
|
|
text_ids_3d = inputs_dict["txt_ids"].unsqueeze(0)
|
|
image_ids_3d = inputs_dict["img_ids"].unsqueeze(0)
|
|
|
|
assert text_ids_3d.ndim == 3, "text_ids_3d should be a 3d tensor"
|
|
assert image_ids_3d.ndim == 3, "img_ids_3d should be a 3d tensor"
|
|
|
|
inputs_dict["txt_ids"] = text_ids_3d
|
|
inputs_dict["img_ids"] = image_ids_3d
|
|
|
|
with torch.no_grad():
|
|
output_2 = model(**inputs_dict).to_tuple()[0]
|
|
|
|
self.assertEqual(output_1.shape, output_2.shape)
|
|
self.assertTrue(
|
|
torch.allclose(output_1, output_2, atol=1e-5),
|
|
msg="output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) are not equal as them as 2d inputs",
|
|
)
|
|
|
|
def test_gradient_checkpointing_is_applied(self):
|
|
expected_set = {"FluxTransformer2DModel"}
|
|
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
|
|
|
|
|
class FluxTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
|
|
model_class = FluxTransformer2DModel
|
|
|
|
def prepare_init_args_and_inputs_for_common(self):
|
|
return FluxTransformerTests().prepare_init_args_and_inputs_for_common()
|
|
|
|
|
|
class FluxTransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase):
|
|
model_class = FluxTransformer2DModel
|
|
|
|
def prepare_init_args_and_inputs_for_common(self):
|
|
return FluxTransformerTests().prepare_init_args_and_inputs_for_common()
|