mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
Fixed multi-token textual inversion training (#4452)
* added placeholder token concatenation during training * Update examples/textual_inversion/textual_inversion.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> --------- Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
This commit is contained in:
@@ -708,7 +708,7 @@ def main():
|
||||
data_root=args.train_data_dir,
|
||||
tokenizer=tokenizer,
|
||||
size=args.resolution,
|
||||
placeholder_token=args.placeholder_token,
|
||||
placeholder_token=(" ".join(tokenizer.convert_ids_to_tokens(placeholder_token_ids))),
|
||||
repeats=args.repeats,
|
||||
learnable_property=args.learnable_property,
|
||||
center_crop=args.center_crop,
|
||||
|
||||
Reference in New Issue
Block a user