mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-29 07:22:12 +03:00
* initial commit
* initial commit
* initial commit
* initial commit
* initial commit
* initial commit
* Update examples/dreambooth/train_dreambooth_lora_hidream.py
Co-authored-by: Bagheera <59658056+bghira@users.noreply.github.com>
* move prompt embeds, pooled embeds outside
* Update examples/dreambooth/train_dreambooth_lora_hidream.py
Co-authored-by: hlky <hlky@hlky.ac>
* Update examples/dreambooth/train_dreambooth_lora_hidream.py
Co-authored-by: hlky <hlky@hlky.ac>
* fix import
* fix import and tokenizer 4, text encoder 4 loading
* te
* prompt embeds
* fix naming
* shapes
* initial commit to add HiDreamImageLoraLoaderMixin
* fix init
* add tests
* loader
* fix model input
* add code example to readme
* fix default max length of text encoders
* prints
* nullify training cond in unpatchify for temp fix to incompatible shaping of transformer output during training
* smol fix
* unpatchify
* unpatchify
* fix validation
* flip pred and loss
* fix shift!!!
* revert unpatchify changes (for now)
* smol fix
* Apply style fixes
* workaround moe training
* workaround moe training
* remove prints
* to reduce some memory, keep vae in `weight_dtype` same as we have for flux (as it's the same vae)
bbd0c161b5/examples/dreambooth/train_dreambooth_lora_flux.py (L1207)
* refactor to align with HiDream refactor
* refactor to align with HiDream refactor
* refactor to align with HiDream refactor
* add support for cpu offloading of text encoders
* Apply style fixes
* adjust lr and rank for train example
* fix copies
* Apply style fixes
* update README
* update README
* update README
* fix license
* keep prompt2,3,4 as None in validation
* remove reverse ode comment
* Update examples/dreambooth/train_dreambooth_lora_hidream.py
Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
* Update examples/dreambooth/train_dreambooth_lora_hidream.py
Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
* vae offload change
* fix text encoder offloading
* Apply style fixes
* cleaner to_kwargs
* fix module name in copied from
* add requirements
* fix offloading
* fix offloading
* fix offloading
* update transformers version in reqs
* try AutoTokenizer
* try AutoTokenizer
* Apply style fixes
* empty commit
* Delete tests/lora/test_lora_layers_hidream.py
* change tokenizer_4 to load with AutoTokenizer as well
* make text_encoder_four and tokenizer_four configurable
* save model card
* save model card
* revert T5
* fix test
* remove non diffusers lumina2 conversion
---------
Co-authored-by: Bagheera <59658056+bghira@users.noreply.github.com>
Co-authored-by: hlky <hlky@hlky.ac>
Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
221 lines
9.7 KiB
Python
221 lines
9.7 KiB
Python
# coding=utf-8
|
|
# Copyright 2024 HuggingFace Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import logging
|
|
import os
|
|
import sys
|
|
import tempfile
|
|
|
|
import safetensors
|
|
|
|
|
|
sys.path.append("..")
|
|
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
|
|
|
|
|
|
logging.basicConfig(level=logging.DEBUG)
|
|
|
|
logger = logging.getLogger()
|
|
stream_handler = logging.StreamHandler(sys.stdout)
|
|
logger.addHandler(stream_handler)
|
|
|
|
|
|
class DreamBoothLoRAHiDreamImage(ExamplesTestsAccelerate):
|
|
instance_data_dir = "docs/source/en/imgs"
|
|
pretrained_model_name_or_path = "hf-internal-testing/tiny-hidream-i1-pipe"
|
|
text_encoder_4_path = "hf-internal-testing/tiny-random-LlamaForCausalLM"
|
|
tokenizer_4_path = "hf-internal-testing/tiny-random-LlamaForCausalLM"
|
|
script_path = "examples/dreambooth/train_dreambooth_lora_hidream.py"
|
|
transformer_layer_type = "double_stream_blocks.0.block.attn1.to_k"
|
|
|
|
def test_dreambooth_lora_hidream(self):
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
test_args = f"""
|
|
{self.script_path}
|
|
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
|
--pretrained_text_encoder_4_name_or_path {self.text_encoder_4_path}
|
|
--pretrained_tokenizer_4_name_or_path {self.tokenizer_4_path}
|
|
--instance_data_dir {self.instance_data_dir}
|
|
--resolution 32
|
|
--train_batch_size 1
|
|
--gradient_accumulation_steps 1
|
|
--max_train_steps 2
|
|
--learning_rate 5.0e-04
|
|
--scale_lr
|
|
--lr_scheduler constant
|
|
--lr_warmup_steps 0
|
|
--output_dir {tmpdir}
|
|
--max_sequence_length 16
|
|
""".split()
|
|
|
|
test_args.extend(["--instance_prompt", ""])
|
|
run_command(self._launch_args + test_args)
|
|
# save_pretrained smoke test
|
|
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
|
|
|
# make sure the state_dict has the correct naming in the parameters.
|
|
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
|
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
|
self.assertTrue(is_lora)
|
|
|
|
# when not training the text encoder, all the parameters in the state dict should start
|
|
# with `"transformer"` in their names.
|
|
starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
|
|
self.assertTrue(starts_with_transformer)
|
|
|
|
def test_dreambooth_lora_latent_caching(self):
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
test_args = f"""
|
|
{self.script_path}
|
|
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
|
--pretrained_text_encoder_4_name_or_path {self.text_encoder_4_path}
|
|
--pretrained_tokenizer_4_name_or_path {self.tokenizer_4_path}
|
|
--instance_data_dir {self.instance_data_dir}
|
|
--resolution 32
|
|
--train_batch_size 1
|
|
--gradient_accumulation_steps 1
|
|
--max_train_steps 2
|
|
--cache_latents
|
|
--learning_rate 5.0e-04
|
|
--scale_lr
|
|
--lr_scheduler constant
|
|
--lr_warmup_steps 0
|
|
--output_dir {tmpdir}
|
|
--max_sequence_length 16
|
|
""".split()
|
|
|
|
test_args.extend(["--instance_prompt", ""])
|
|
run_command(self._launch_args + test_args)
|
|
# save_pretrained smoke test
|
|
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
|
|
|
# make sure the state_dict has the correct naming in the parameters.
|
|
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
|
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
|
self.assertTrue(is_lora)
|
|
|
|
# when not training the text encoder, all the parameters in the state dict should start
|
|
# with `"transformer"` in their names.
|
|
starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
|
|
self.assertTrue(starts_with_transformer)
|
|
|
|
def test_dreambooth_lora_layers(self):
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
test_args = f"""
|
|
{self.script_path}
|
|
--pretrained_model_name_or_path {self.pretrained_model_name_or_path}
|
|
--pretrained_text_encoder_4_name_or_path {self.text_encoder_4_path}
|
|
--pretrained_tokenizer_4_name_or_path {self.tokenizer_4_path}
|
|
--instance_data_dir {self.instance_data_dir}
|
|
--resolution 32
|
|
--train_batch_size 1
|
|
--gradient_accumulation_steps 1
|
|
--max_train_steps 2
|
|
--cache_latents
|
|
--learning_rate 5.0e-04
|
|
--scale_lr
|
|
--lora_layers {self.transformer_layer_type}
|
|
--lr_scheduler constant
|
|
--lr_warmup_steps 0
|
|
--output_dir {tmpdir}
|
|
--max_sequence_length 16
|
|
""".split()
|
|
|
|
test_args.extend(["--instance_prompt", ""])
|
|
run_command(self._launch_args + test_args)
|
|
# save_pretrained smoke test
|
|
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
|
|
|
|
# make sure the state_dict has the correct naming in the parameters.
|
|
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
|
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
|
self.assertTrue(is_lora)
|
|
|
|
# when not training the text encoder, all the parameters in the state dict should start
|
|
# with `"transformer"` in their names. In this test, we only params of
|
|
# `self.transformer_layer_type` should be in the state dict.
|
|
starts_with_transformer = all(self.transformer_layer_type in key for key in lora_state_dict)
|
|
self.assertTrue(starts_with_transformer)
|
|
|
|
def test_dreambooth_lora_hidream_checkpointing_checkpoints_total_limit(self):
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
test_args = f"""
|
|
{self.script_path}
|
|
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
|
--pretrained_text_encoder_4_name_or_path {self.text_encoder_4_path}
|
|
--pretrained_tokenizer_4_name_or_path {self.tokenizer_4_path}
|
|
--instance_data_dir={self.instance_data_dir}
|
|
--output_dir={tmpdir}
|
|
--resolution=32
|
|
--train_batch_size=1
|
|
--gradient_accumulation_steps=1
|
|
--max_train_steps=6
|
|
--checkpoints_total_limit=2
|
|
--checkpointing_steps=2
|
|
--max_sequence_length 16
|
|
""".split()
|
|
|
|
test_args.extend(["--instance_prompt", ""])
|
|
run_command(self._launch_args + test_args)
|
|
|
|
self.assertEqual(
|
|
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
|
|
{"checkpoint-4", "checkpoint-6"},
|
|
)
|
|
|
|
def test_dreambooth_lora_hidream_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
test_args = f"""
|
|
{self.script_path}
|
|
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
|
--pretrained_text_encoder_4_name_or_path {self.text_encoder_4_path}
|
|
--pretrained_tokenizer_4_name_or_path {self.tokenizer_4_path}
|
|
--instance_data_dir={self.instance_data_dir}
|
|
--output_dir={tmpdir}
|
|
--resolution=32
|
|
--train_batch_size=1
|
|
--gradient_accumulation_steps=1
|
|
--max_train_steps=4
|
|
--checkpointing_steps=2
|
|
--max_sequence_length 16
|
|
""".split()
|
|
|
|
test_args.extend(["--instance_prompt", ""])
|
|
run_command(self._launch_args + test_args)
|
|
|
|
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"})
|
|
|
|
resume_run_args = f"""
|
|
{self.script_path}
|
|
--pretrained_model_name_or_path={self.pretrained_model_name_or_path}
|
|
--pretrained_text_encoder_4_name_or_path {self.text_encoder_4_path}
|
|
--pretrained_tokenizer_4_name_or_path {self.tokenizer_4_path}
|
|
--instance_data_dir={self.instance_data_dir}
|
|
--output_dir={tmpdir}
|
|
--resolution=32
|
|
--train_batch_size=1
|
|
--gradient_accumulation_steps=1
|
|
--max_train_steps=8
|
|
--checkpointing_steps=2
|
|
--resume_from_checkpoint=checkpoint-4
|
|
--checkpoints_total_limit=2
|
|
--max_sequence_length 16
|
|
""".split()
|
|
|
|
resume_run_args.extend(["--instance_prompt", ""])
|
|
run_command(self._launch_args + resume_run_args)
|
|
|
|
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
|