diff --git a/examples/text_to_image/train_text_to_image_lora.py b/examples/text_to_image/train_text_to_image_lora.py index a3c5bef73a..abc535594d 100644 --- a/examples/text_to_image/train_text_to_image_lora.py +++ b/examples/text_to_image/train_text_to_image_lora.py @@ -418,9 +418,9 @@ def main(): # freeze parameters of models to save more memory unet.requires_grad_(False) vae.requires_grad_(False) - + text_encoder.requires_grad_(False) - + # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32