From c1dc2ae61968cccc3924784e4ed65ccfcd5c86eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tolga=20Cang=C3=B6z?= <46008593+tolgacangoz@users.noreply.github.com> Date: Wed, 17 Jul 2024 16:33:12 +0300 Subject: [PATCH] Fix multi-gpu case for `train_cm_ct_unconditional.py` (#8653) * Fix multi-gpu case * Prefer previously created `unwrap_model()` function For `torch.compile()` generalizability * `chore: update unwrap_model() function to use accelerator.unwrap_model()` --- .../consistency_training/train_cm_ct_unconditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/consistency_training/train_cm_ct_unconditional.py b/examples/research_projects/consistency_training/train_cm_ct_unconditional.py index b7a1e2a545..eccc539f23 100644 --- a/examples/research_projects/consistency_training/train_cm_ct_unconditional.py +++ b/examples/research_projects/consistency_training/train_cm_ct_unconditional.py @@ -1195,7 +1195,7 @@ def main(args): # Resolve the c parameter for the Pseudo-Huber loss if args.huber_c is None: - args.huber_c = 0.00054 * args.resolution * math.sqrt(unet.config.in_channels) + args.huber_c = 0.00054 * args.resolution * math.sqrt(unwrap_model(unet).config.in_channels) # Get current number of discretization steps N according to our discretization curriculum current_discretization_steps = get_discretization_steps(