From 66ee73eebc41e733562502ae87e0c97943e6f589 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Wed, 29 Jun 2022 17:17:00 +0200 Subject: [PATCH 01/32] refactor up/down sample blocks in unet_rl --- src/diffusers/models/unet_rl.py | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 2dea6f2106..872e2340f9 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -6,7 +6,7 @@ import torch.nn as nn from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .embeddings import get_timestep_embedding -from .resnet import ResidualTemporalBlock +from .resnet import Downsample, ResidualTemporalBlock, Upsample class SinusoidalPosEmb(nn.Module): @@ -18,24 +18,6 @@ class SinusoidalPosEmb(nn.Module): return get_timestep_embedding(x, self.dim) -class Downsample1d(nn.Module): - def __init__(self, dim): - super().__init__() - self.conv = nn.Conv1d(dim, dim, 3, 2, 1) - - def forward(self, x): - return self.conv(x) - - -class Upsample1d(nn.Module): - def __init__(self, dim): - super().__init__() - self.conv = nn.ConvTranspose1d(dim, dim, 4, 2, 1) - - def forward(self, x): - return self.conv(x) - - class RearrangeDim(nn.Module): def __init__(self): super().__init__() @@ -114,7 +96,7 @@ class TemporalUNet(ModelMixin, ConfigMixin): # (nn.Module): [ ResidualTemporalBlock(dim_in, dim_out, embed_dim=time_dim, horizon=training_horizon), ResidualTemporalBlock(dim_out, dim_out, embed_dim=time_dim, horizon=training_horizon), - Downsample1d(dim_out) if not is_last else nn.Identity(), + Downsample(dim_out, use_conv=True, dims=1) if not is_last else nn.Identity(), ] ) ) @@ -134,7 +116,7 @@ class TemporalUNet(ModelMixin, ConfigMixin): # (nn.Module): [ ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=time_dim, horizon=training_horizon), ResidualTemporalBlock(dim_in, dim_in, embed_dim=time_dim, horizon=training_horizon), - Upsample1d(dim_in) if not is_last else nn.Identity(), + Upsample(dim_in, use_conv_transpose=True, dims=1) if not is_last else nn.Identity(), ] ) ) From 358531be9d858d41077c7e3ebe02a44df6261487 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 29 Jun 2022 17:30:35 +0000 Subject: [PATCH 02/32] up --- src/diffusers/models/resnet.py | 185 +++++++++++++++++++++++++++++-- src/diffusers/models/unet_ldm.py | 179 ++++++++++++++++-------------- 2 files changed, 271 insertions(+), 93 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 49c1564253..8972e58e5f 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -162,7 +162,7 @@ class Downsample(nn.Module): # RESNETS -# unet_glide.py & unet_ldm.py +# unet_glide.py class ResBlock(TimestepBlock): """ A residual block that can optionally change the number of channels. @@ -188,6 +188,7 @@ class ResBlock(TimestepBlock): use_checkpoint=False, up=False, down=False, + overwrite=False, # TODO(Patrick) - use for glide at later stage ): super().__init__() self.channels = channels @@ -236,6 +237,65 @@ class ResBlock(TimestepBlock): else: self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + self.overwrite = overwrite + self.is_overwritten = False + if self.overwrite: + in_channels = channels + out_channels = self.out_channels + conv_shortcut = False + dropout = 0.0 + temb_channels = emb_channels + groups = 32 + pre_norm = True + eps = 1e-5 + non_linearity = "silu" + self.pre_norm = pre_norm + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + if self.pre_norm: + self.norm1 = Normalize(in_channels, num_groups=groups, eps=eps) + else: + self.norm1 = Normalize(out_channels, num_groups=groups, eps=eps) + + self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + self.norm2 = Normalize(out_channels, num_groups=groups, eps=eps) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) + if non_linearity == "swish": + self.nonlinearity = nonlinearity + elif non_linearity == "mish": + self.nonlinearity = Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() + + if self.in_channels != self.out_channels: + self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + + def set_weights(self): + # TODO(Patrick): use for glide at later stage + self.norm1.weight.data = self.in_layers[0].weight.data + self.norm1.bias.data = self.in_layers[0].bias.data + + self.conv1.weight.data = self.in_layers[-1].weight.data + self.conv1.bias.data = self.in_layers[-1].bias.data + + self.temb_proj.weight.data = self.emb_layers[-1].weight.data + self.temb_proj.bias.data = self.emb_layers[-1].bias.data + + self.norm2.weight.data = self.out_layers[0].weight.data + self.norm2.bias.data = self.out_layers[0].bias.data + + self.conv2.weight.data = self.out_layers[-1].weight.data + self.conv2.bias.data = self.out_layers[-1].bias.data + + if self.in_channels != self.out_channels: + self.nin_shortcut.weight.data = self.skip_connection.weight.data + self.nin_shortcut.bias.data = self.skip_connection.bias.data + def forward(self, x, emb): """ Apply the block to a Tensor, conditioned on a timestep embedding. @@ -243,6 +303,10 @@ class ResBlock(TimestepBlock): :param x: an [N x C x ...] Tensor of features. :param emb: an [N x emb_channels] Tensor of timestep embeddings. :return: an [N x C x ...] Tensor of outputs. """ + if self.overwrite: + # TODO(Patrick): use for glide at later stage + self.set_weights() + if self.updown: in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] h = in_rest(x) @@ -251,6 +315,7 @@ class ResBlock(TimestepBlock): h = in_conv(h) else: h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] @@ -262,7 +327,50 @@ class ResBlock(TimestepBlock): else: h = h + emb_out h = self.out_layers(h) - return self.skip_connection(x) + h + + result = self.skip_connection(x) + h + +# TODO(Patrick) Use for glide at later stage +# result = self.forward_2(x, emb) + + return result + + def forward_2(self, x, temb, mask=1.0): + if self.overwrite and not self.is_overwritten: + self.set_weights() + self.is_overwritten = True + + h = x + if self.pre_norm: + h = self.norm1(h) + h = self.nonlinearity(h) + + h = self.conv1(h) + + if not self.pre_norm: + h = self.norm1(h) + h = self.nonlinearity(h) + + h = h + self.temb_proj(self.nonlinearity(temb))[:, :, None, None] + + if self.pre_norm: + h = self.norm2(h) + h = self.nonlinearity(h) + + h = self.dropout(h) + h = self.conv2(h) + + if not self.pre_norm: + h = self.norm2(h) + h = self.nonlinearity(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x + h # unet.py and unet_grad_tts.py @@ -280,6 +388,7 @@ class ResnetBlock(nn.Module): eps=1e-6, non_linearity="swish", overwrite_for_grad_tts=False, + overwrite_for_ldm=False, ): super().__init__() self.pre_norm = pre_norm @@ -302,15 +411,19 @@ class ResnetBlock(nn.Module): self.nonlinearity = nonlinearity elif non_linearity == "mish": self.nonlinearity = Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() if self.in_channels != self.out_channels: if self.use_conv_shortcut: + # TODO(Patrick) - this branch is never used I think => can be deleted! self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) self.is_overwritten = False self.overwrite_for_grad_tts = overwrite_for_grad_tts + self.overwrite_for_ldm = overwrite_for_ldm if self.overwrite_for_grad_tts: dim = in_channels dim_out = out_channels @@ -324,6 +437,39 @@ class ResnetBlock(nn.Module): self.res_conv = torch.nn.Conv2d(dim, dim_out, 1) else: self.res_conv = torch.nn.Identity() + elif self.overwrite_for_ldm: + dims = 2 +# eps = 1e-5 +# non_linearity = "silu" +# overwrite_for_ldm + channels = in_channels + emb_channels = temb_channels + use_scale_shift_norm = False + + self.in_layers = nn.Sequential( + normalization(channels, swish=1.0), + nn.Identity(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0), + nn.SiLU() if use_scale_shift_norm else nn.Identity(), + nn.Dropout(p=dropout), + zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)), + ) + if self.out_channels == in_channels: + self.skip_connection = nn.Identity() +# elif use_conv: +# self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) def set_weights_grad_tts(self): self.conv1.weight.data = self.block1.block[0].weight.data @@ -343,13 +489,36 @@ class ResnetBlock(nn.Module): self.nin_shortcut.weight.data = self.res_conv.weight.data self.nin_shortcut.bias.data = self.res_conv.bias.data - def forward(self, x, temb, mask=None): + def set_weights_ldm(self): + self.norm1.weight.data = self.in_layers[0].weight.data + self.norm1.bias.data = self.in_layers[0].bias.data + + self.conv1.weight.data = self.in_layers[-1].weight.data + self.conv1.bias.data = self.in_layers[-1].bias.data + + self.temb_proj.weight.data = self.emb_layers[-1].weight.data + self.temb_proj.bias.data = self.emb_layers[-1].bias.data + + self.norm2.weight.data = self.out_layers[0].weight.data + self.norm2.bias.data = self.out_layers[0].bias.data + + self.conv2.weight.data = self.out_layers[-1].weight.data + self.conv2.bias.data = self.out_layers[-1].bias.data + + if self.in_channels != self.out_channels: + self.nin_shortcut.weight.data = self.skip_connection.weight.data + self.nin_shortcut.bias.data = self.skip_connection.bias.data + + def forward(self, x, temb, mask=1.0): if self.overwrite_for_grad_tts and not self.is_overwritten: self.set_weights_grad_tts() self.is_overwritten = True + elif self.overwrite_for_ldm and not self.is_overwritten: + self.set_weights_ldm() + self.is_overwritten = True h = x - h = h * mask if mask is not None else h + h = h * mask if self.pre_norm: h = self.norm1(h) h = self.nonlinearity(h) @@ -359,11 +528,11 @@ class ResnetBlock(nn.Module): if not self.pre_norm: h = self.norm1(h) h = self.nonlinearity(h) - h = h * mask if mask is not None else h + h = h * mask h = h + self.temb_proj(self.nonlinearity(temb))[:, :, None, None] - h = h * mask if mask is not None else h + h = h * mask if self.pre_norm: h = self.norm2(h) h = self.nonlinearity(h) @@ -374,9 +543,9 @@ class ResnetBlock(nn.Module): if not self.pre_norm: h = self.norm2(h) h = self.nonlinearity(h) - h = h * mask if mask is not None else h + h = h * mask - x = x * mask if mask is not None else x + x = x * mask if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) diff --git a/src/diffusers/models/unet_ldm.py b/src/diffusers/models/unet_ldm.py index 0571013d9d..f78f3afd09 100644 --- a/src/diffusers/models/unet_ldm.py +++ b/src/diffusers/models/unet_ldm.py @@ -10,7 +10,9 @@ from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding -from .resnet import Downsample, ResBlock, TimestepBlock, Upsample +from .resnet import Downsample, TimestepBlock, Upsample +from .resnet import ResnetBlock +#from .resnet import ResBlock def exists(val): @@ -364,7 +366,7 @@ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): def forward(self, x, emb, context=None): for layer in self: - if isinstance(layer, TimestepBlock): + if isinstance(layer, TimestepBlock) or isinstance(layer, ResnetBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) @@ -559,14 +561,14 @@ class UNetLDMModel(ModelMixin, ConfigMixin): for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, + ResnetBlock( + in_channels=ch, + out_channels=mult * model_channels, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, ) ] ch = mult * model_channels @@ -599,16 +601,17 @@ class UNetLDMModel(ModelMixin, ConfigMixin): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# out_channels=out_ch, +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# down=True, +# ) + None if resblock_updown else Downsample( ch, use_conv=conv_resample, dims=dims, out_channels=out_ch, padding=1, name="op" @@ -629,13 +632,14 @@ class UNetLDMModel(ModelMixin, ConfigMixin): # num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, + ResnetBlock( + in_channels=ch, + out_channels=None, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, ), AttentionBlock( ch, @@ -646,13 +650,14 @@ class UNetLDMModel(ModelMixin, ConfigMixin): ) if not use_spatial_transformer else SpatialTransformer(ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, + ResnetBlock( + in_channels=ch, + out_channels=None, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, ), ) self._feature_size += ch @@ -662,15 +667,15 @@ class UNetLDMModel(ModelMixin, ConfigMixin): for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, + ResnetBlock( + in_channels=ch + ich, out_channels=model_channels * mult, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, + ), ] ch = model_channels * mult if ds in attention_resolutions: @@ -698,16 +703,17 @@ class UNetLDMModel(ModelMixin, ConfigMixin): if level and i == num_res_blocks: out_ch = ch layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# out_channels=out_ch, +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# up=True, +# ) + None if resblock_updown else Upsample(ch, use_conv=conv_resample, dims=dims, out_channels=out_ch) ) @@ -842,15 +848,15 @@ class EncoderUNetModel(nn.Module): for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) + ResnetBlock( + in_channels=ch, + out_channels=model_channels * mult, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, + ), ] ch = mult * model_channels if ds in attention_resolutions: @@ -870,16 +876,17 @@ class EncoderUNetModel(nn.Module): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# out_channels=out_ch, +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# down=True, +# ) + None if resblock_updown else Downsample( ch, use_conv=conv_resample, dims=dims, out_channels=out_ch, padding=1, name="op" @@ -892,13 +899,14 @@ class EncoderUNetModel(nn.Module): self._feature_size += ch self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, + ResnetBlock( + in_channels=ch, + out_channels=None, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, ), AttentionBlock( ch, @@ -907,13 +915,14 @@ class EncoderUNetModel(nn.Module): num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, + ResnetBlock( + in_channels=ch, + out_channels=None, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, ), ) self._feature_size += ch From 26ce60c46d128b820674e99c847304d1e424b661 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 29 Jun 2022 17:30:48 +0000 Subject: [PATCH 03/32] up --- src/diffusers/models/resnet.py | 14 +++--- src/diffusers/models/unet_ldm.py | 83 ++++++++++++++++---------------- 2 files changed, 49 insertions(+), 48 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 8972e58e5f..93c0cf1782 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -330,8 +330,8 @@ class ResBlock(TimestepBlock): result = self.skip_connection(x) + h -# TODO(Patrick) Use for glide at later stage -# result = self.forward_2(x, emb) + # TODO(Patrick) Use for glide at later stage + # result = self.forward_2(x, emb) return result @@ -439,9 +439,9 @@ class ResnetBlock(nn.Module): self.res_conv = torch.nn.Identity() elif self.overwrite_for_ldm: dims = 2 -# eps = 1e-5 -# non_linearity = "silu" -# overwrite_for_ldm + # eps = 1e-5 + # non_linearity = "silu" + # overwrite_for_ldm channels = in_channels emb_channels = temb_channels use_scale_shift_norm = False @@ -466,8 +466,8 @@ class ResnetBlock(nn.Module): ) if self.out_channels == in_channels: self.skip_connection = nn.Identity() -# elif use_conv: -# self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) + # elif use_conv: + # self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) else: self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) diff --git a/src/diffusers/models/unet_ldm.py b/src/diffusers/models/unet_ldm.py index f78f3afd09..9c01f0d17e 100644 --- a/src/diffusers/models/unet_ldm.py +++ b/src/diffusers/models/unet_ldm.py @@ -10,9 +10,10 @@ from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding -from .resnet import Downsample, TimestepBlock, Upsample -from .resnet import ResnetBlock -#from .resnet import ResBlock +from .resnet import Downsample, ResnetBlock, TimestepBlock, Upsample + + +# from .resnet import ResBlock def exists(val): @@ -561,14 +562,14 @@ class UNetLDMModel(ModelMixin, ConfigMixin): for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ - ResnetBlock( - in_channels=ch, - out_channels=mult * model_channels, - dropout=dropout, - temb_channels=time_embed_dim, - eps=1e-5, - non_linearity="silu", - overwrite_for_ldm=True, + ResnetBlock( + in_channels=ch, + out_channels=mult * model_channels, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + overwrite_for_ldm=True, ) ] ch = mult * model_channels @@ -601,16 +602,16 @@ class UNetLDMModel(ModelMixin, ConfigMixin): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# out_channels=out_ch, -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# down=True, -# ) + # ResBlock( + # ch, + # time_embed_dim, + # dropout, + # out_channels=out_ch, + # dims=dims, + # use_checkpoint=use_checkpoint, + # use_scale_shift_norm=use_scale_shift_norm, + # down=True, + # ) None if resblock_updown else Downsample( @@ -703,16 +704,16 @@ class UNetLDMModel(ModelMixin, ConfigMixin): if level and i == num_res_blocks: out_ch = ch layers.append( -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# out_channels=out_ch, -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# up=True, -# ) + # ResBlock( + # ch, + # time_embed_dim, + # dropout, + # out_channels=out_ch, + # dims=dims, + # use_checkpoint=use_checkpoint, + # use_scale_shift_norm=use_scale_shift_norm, + # up=True, + # ) None if resblock_updown else Upsample(ch, use_conv=conv_resample, dims=dims, out_channels=out_ch) @@ -876,16 +877,16 @@ class EncoderUNetModel(nn.Module): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# out_channels=out_ch, -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# down=True, -# ) + # ResBlock( + # ch, + # time_embed_dim, + # dropout, + # out_channels=out_ch, + # dims=dims, + # use_checkpoint=use_checkpoint, + # use_scale_shift_norm=use_scale_shift_norm, + # down=True, + # ) None if resblock_updown else Downsample( From 21aac1aca905d8961d7f06d676d9e8a217f129c8 Mon Sep 17 00:00:00 2001 From: anton-l Date: Thu, 30 Jun 2022 10:21:37 +0200 Subject: [PATCH 04/32] fix setup --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7ccbd630e0..21f607949e 100644 --- a/setup.py +++ b/setup.py @@ -88,7 +88,7 @@ _deps = [ "requests", "torch>=1.4", "tensorboard", - "modelcards=0.1.4" + "modelcards==0.1.4" ] # this is a lookup table with items like: From c9bd4d433845921ddf7c0b0a50be3c7bdf7a80fc Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 11:41:06 +0200 Subject: [PATCH 05/32] remove if fir from resent block and upsample, downsample for sde unet --- src/diffusers/models/resnet.py | 16 ++--- .../models/unet_sde_score_estimation.py | 72 +++++++------------ 2 files changed, 30 insertions(+), 58 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 93c0cf1782..ae6754b1d8 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -614,19 +614,11 @@ class ResnetBlockBigGANpp(nn.Module): h = self.act(self.GroupNorm_0(x)) if self.up: - if self.fir: - h = upsample_2d(h, self.fir_kernel, factor=2) - x = upsample_2d(x, self.fir_kernel, factor=2) - else: - h = naive_upsample_2d(h, factor=2) - x = naive_upsample_2d(x, factor=2) + h = upsample_2d(h, self.fir_kernel, factor=2) + x = upsample_2d(x, self.fir_kernel, factor=2) elif self.down: - if self.fir: - h = downsample_2d(h, self.fir_kernel, factor=2) - x = downsample_2d(x, self.fir_kernel, factor=2) - else: - h = naive_downsample_2d(h, factor=2) - x = naive_downsample_2d(x, factor=2) + h = downsample_2d(h, self.fir_kernel, factor=2) + x = downsample_2d(x, self.fir_kernel, factor=2) h = self.Conv_0(h) # Add bias to each feature map conditioned on the time embedding diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 8acf337268..508bac141b 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -417,20 +417,16 @@ class Upsample(nn.Module): def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_ch = out_ch if out_ch else in_ch - if not fir: - if with_conv: - self.Conv_0 = conv3x3(in_ch, out_ch) - else: - if with_conv: - self.Conv2d_0 = Conv2d( - in_ch, - out_ch, - kernel=3, - up=True, - resample_kernel=fir_kernel, - use_bias=True, - kernel_init=default_init(), - ) + if with_conv: + self.Conv2d_0 = Conv2d( + in_ch, + out_ch, + kernel=3, + up=True, + resample_kernel=fir_kernel, + use_bias=True, + kernel_init=default_init(), + ) self.fir = fir self.with_conv = with_conv self.fir_kernel = fir_kernel @@ -438,15 +434,10 @@ class Upsample(nn.Module): def forward(self, x): B, C, H, W = x.shape - if not self.fir: - h = F.interpolate(x, (H * 2, W * 2), "nearest") - if self.with_conv: - h = self.Conv_0(h) + if not self.with_conv: + h = upsample_2d(x, self.fir_kernel, factor=2) else: - if not self.with_conv: - h = upsample_2d(x, self.fir_kernel, factor=2) - else: - h = self.Conv2d_0(x) + h = self.Conv2d_0(x) return h @@ -455,20 +446,16 @@ class Downsample(nn.Module): def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_ch = out_ch if out_ch else in_ch - if not fir: - if with_conv: - self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, padding=0) - else: - if with_conv: - self.Conv2d_0 = Conv2d( - in_ch, - out_ch, - kernel=3, - down=True, - resample_kernel=fir_kernel, - use_bias=True, - kernel_init=default_init(), - ) + if with_conv: + self.Conv2d_0 = Conv2d( + in_ch, + out_ch, + kernel=3, + down=True, + resample_kernel=fir_kernel, + use_bias=True, + kernel_init=default_init(), + ) self.fir = fir self.fir_kernel = fir_kernel self.with_conv = with_conv @@ -476,17 +463,10 @@ class Downsample(nn.Module): def forward(self, x): B, C, H, W = x.shape - if not self.fir: - if self.with_conv: - x = F.pad(x, (0, 1, 0, 1)) - x = self.Conv_0(x) - else: - x = F.avg_pool2d(x, 2, stride=2) + if not self.with_conv: + x = downsample_2d(x, self.fir_kernel, factor=2) else: - if not self.with_conv: - x = downsample_2d(x, self.fir_kernel, factor=2) - else: - x = self.Conv2d_0(x) + x = self.Conv2d_0(x) return x From 81e71447830ca32879c92d4d90f9d1646fe76f36 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 11:46:01 +0200 Subject: [PATCH 06/32] remove naive up/down sample --- src/diffusers/models/resnet.py | 13 ------------- src/diffusers/models/unet_sde_score_estimation.py | 13 ------------- 2 files changed, 26 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index ae6754b1d8..e55b83e962 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -957,19 +957,6 @@ def downsample_2d(x, k=None, factor=2, gain=1): return upfirdn2d(x, torch.tensor(k, device=x.device), down=factor, pad=((p + 1) // 2, p // 2)) -def naive_upsample_2d(x, factor=2): - _N, C, H, W = x.shape - x = torch.reshape(x, (-1, C, H, 1, W, 1)) - x = x.repeat(1, 1, 1, factor, 1, factor) - return torch.reshape(x, (-1, C, H * factor, W * factor)) - - -def naive_downsample_2d(x, factor=2): - _N, C, H, W = x.shape - x = torch.reshape(x, (-1, C, H // factor, factor, W // factor, factor)) - return torch.mean(x, dim=(3, 5)) - - class NIN(nn.Module): def __init__(self, in_dim, num_units, init_scale=0.1): super().__init__() diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 508bac141b..dda144457f 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -123,19 +123,6 @@ class Conv2d(nn.Module): return x -def naive_upsample_2d(x, factor=2): - _N, C, H, W = x.shape - x = torch.reshape(x, (-1, C, H, 1, W, 1)) - x = x.repeat(1, 1, 1, factor, 1, factor) - return torch.reshape(x, (-1, C, H * factor, W * factor)) - - -def naive_downsample_2d(x, factor=2): - _N, C, H, W = x.shape - x = torch.reshape(x, (-1, C, H // factor, factor, W // factor, factor)) - return torch.mean(x, dim=(3, 5)) - - def upsample_conv_2d(x, w, k=None, factor=2, gain=1): """Fused `upsample_2d()` followed by `tf.nn.conv2d()`. From 8830af116834e20c074bf29cedde084b4e515ae1 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 11:54:32 +0200 Subject: [PATCH 07/32] get rid ResnetBlockDDPMpp and related functions --- src/diffusers/models/resnet.py | 68 -------------- .../models/unet_sde_score_estimation.py | 88 ++++--------------- 2 files changed, 17 insertions(+), 139 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index e55b83e962..225828538c 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -637,62 +637,6 @@ class ResnetBlockBigGANpp(nn.Module): return (x + h) / np.sqrt(2.0) -# unet_score_estimation.py -class ResnetBlockDDPMpp(nn.Module): - """ResBlock adapted from DDPM.""" - - def __init__( - self, - act, - in_ch, - out_ch=None, - temb_dim=None, - conv_shortcut=False, - dropout=0.1, - skip_rescale=False, - init_scale=0.0, - ): - super().__init__() - out_ch = out_ch if out_ch else in_ch - self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) - self.Conv_0 = conv3x3(in_ch, out_ch) - if temb_dim is not None: - self.Dense_0 = nn.Linear(temb_dim, out_ch) - self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) - nn.init.zeros_(self.Dense_0.bias) - self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) - self.Dropout_0 = nn.Dropout(dropout) - self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) - if in_ch != out_ch: - if conv_shortcut: - self.Conv_2 = conv3x3(in_ch, out_ch) - else: - self.NIN_0 = NIN(in_ch, out_ch) - - self.skip_rescale = skip_rescale - self.act = act - self.out_ch = out_ch - self.conv_shortcut = conv_shortcut - - def forward(self, x, temb=None): - h = self.act(self.GroupNorm_0(x)) - h = self.Conv_0(h) - if temb is not None: - h += self.Dense_0(self.act(temb))[:, :, None, None] - h = self.act(self.GroupNorm_1(h)) - h = self.Dropout_0(h) - h = self.Conv_1(h) - if x.shape[1] != self.out_ch: - if self.conv_shortcut: - x = self.Conv_2(x) - else: - x = self.NIN_0(x) - if not self.skip_rescale: - return x + h - else: - return (x + h) / np.sqrt(2.0) - - # unet_rl.py class ResidualTemporalBlock(nn.Module): def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5): @@ -957,18 +901,6 @@ def downsample_2d(x, k=None, factor=2, gain=1): return upfirdn2d(x, torch.tensor(k, device=x.device), down=factor, pad=((p + 1) // 2, p // 2)) -class NIN(nn.Module): - def __init__(self, in_dim, num_units, init_scale=0.1): - super().__init__() - self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True) - self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) - - def forward(self, x): - x = x.permute(0, 2, 3, 1) - y = contract_inner(x, self.W) + self.b - return y.permute(0, 3, 1, 2) - - def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index dda144457f..890678f1c6 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -28,7 +28,7 @@ from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import GaussianFourierProjection, get_timestep_embedding -from .resnet import ResnetBlockBigGANpp, ResnetBlockDDPMpp +from .resnet import ResnetBlockBigGANpp def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): @@ -305,32 +305,6 @@ def conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1 return conv -def _einsum(a, b, c, x, y): - einsum_str = "{},{}->{}".format("".join(a), "".join(b), "".join(c)) - return torch.einsum(einsum_str, x, y) - - -def contract_inner(x, y): - """tensordot(x, y, 1).""" - x_chars = list(string.ascii_lowercase[: len(x.shape)]) - y_chars = list(string.ascii_lowercase[len(x.shape) : len(y.shape) + len(x.shape)]) - y_chars[0] = x_chars[-1] # first axis of y and last of x get summed - out_chars = x_chars[:-1] + y_chars[1:] - return _einsum(x_chars, y_chars, out_chars, x, y) - - -class NIN(nn.Module): - def __init__(self, in_dim, num_units, init_scale=0.1): - super().__init__() - self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True) - self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) - - def forward(self, x): - x = x.permute(0, 2, 3, 1) - y = contract_inner(x, self.W) + self.b - return y.permute(0, 3, 1, 2) - - def get_act(nonlinearity): """Get activation functions from the config file.""" @@ -575,30 +549,16 @@ class NCSNpp(ModelMixin, ConfigMixin): elif progressive_input == "residual": pyramid_downsample = functools.partial(Down_sample, fir=fir, fir_kernel=fir_kernel, with_conv=True) - if resblock_type == "ddpm": - ResnetBlock = functools.partial( - ResnetBlockDDPMpp, - act=act, - dropout=dropout, - init_scale=init_scale, - skip_rescale=skip_rescale, - temb_dim=nf * 4, - ) - - elif resblock_type == "biggan": - ResnetBlock = functools.partial( - ResnetBlockBigGANpp, - act=act, - dropout=dropout, - fir=fir, - fir_kernel=fir_kernel, - init_scale=init_scale, - skip_rescale=skip_rescale, - temb_dim=nf * 4, - ) - - else: - raise ValueError(f"resblock type {resblock_type} unrecognized.") + ResnetBlock = functools.partial( + ResnetBlockBigGANpp, + act=act, + dropout=dropout, + fir=fir, + fir_kernel=fir_kernel, + init_scale=init_scale, + skip_rescale=skip_rescale, + temb_dim=nf * 4, + ) # Downsampling block @@ -622,10 +582,7 @@ class NCSNpp(ModelMixin, ConfigMixin): hs_c.append(in_ch) if i_level != self.num_resolutions - 1: - if resblock_type == "ddpm": - modules.append(Downsample(in_ch=in_ch)) - else: - modules.append(ResnetBlock(down=True, in_ch=in_ch)) + modules.append(ResnetBlock(down=True, in_ch=in_ch)) if progressive_input == "input_skip": modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch)) @@ -678,10 +635,7 @@ class NCSNpp(ModelMixin, ConfigMixin): raise ValueError(f"{progressive} is not a valid name") if i_level != 0: - if resblock_type == "ddpm": - modules.append(Upsample(in_ch=in_ch)) - else: - modules.append(ResnetBlock(in_ch=in_ch, up=True)) + modules.append(ResnetBlock(in_ch=in_ch, up=True)) assert not hs_c @@ -741,12 +695,8 @@ class NCSNpp(ModelMixin, ConfigMixin): hs.append(h) if i_level != self.num_resolutions - 1: - if self.resblock_type == "ddpm": - h = modules[m_idx](hs[-1]) - m_idx += 1 - else: - h = modules[m_idx](hs[-1], temb) - m_idx += 1 + h = modules[m_idx](hs[-1], temb) + m_idx += 1 if self.progressive_input == "input_skip": input_pyramid = self.pyramid_downsample(input_pyramid) @@ -818,12 +768,8 @@ class NCSNpp(ModelMixin, ConfigMixin): raise ValueError(f"{self.progressive} is not a valid name") if i_level != 0: - if self.resblock_type == "ddpm": - h = modules[m_idx](h) - m_idx += 1 - else: - h = modules[m_idx](h, temb) - m_idx += 1 + h = modules[m_idx](h, temb) + m_idx += 1 assert not hs From b89700812254c0d6a68e848362a13ff15a28ade2 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 12:01:27 +0200 Subject: [PATCH 08/32] more cleanup --- src/diffusers/models/resnet.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 225828538c..50a3e453b4 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -909,17 +909,3 @@ def _setup_kernel(k): assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k - - -def contract_inner(x, y): - """tensordot(x, y, 1).""" - x_chars = list(string.ascii_lowercase[: len(x.shape)]) - y_chars = list(string.ascii_lowercase[len(x.shape) : len(y.shape) + len(x.shape)]) - y_chars[0] = x_chars[-1] # first axis of y and last of x get summed - out_chars = x_chars[:-1] + y_chars[1:] - return _einsum(x_chars, y_chars, out_chars, x, y) - - -def _einsum(a, b, c, x, y): - einsum_str = "{},{}->{}".format("".join(a), "".join(b), "".join(c)) - return torch.einsum(einsum_str, x, y) From ebe683432f1cd4703947ad5691dbc0aaededa7d2 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 12:20:49 +0200 Subject: [PATCH 09/32] cleanup conv1x1 and conv3x3 --- src/diffusers/models/resnet.py | 50 ++++----------- .../models/unet_sde_score_estimation.py | 62 ++++++------------- 2 files changed, 32 insertions(+), 80 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 50a3e453b4..c206859b70 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -593,17 +593,18 @@ class ResnetBlockBigGANpp(nn.Module): self.fir = fir self.fir_kernel = fir_kernel - self.Conv_0 = conv3x3(in_ch, out_ch) + self.Conv_0 = conv2d(in_ch, out_ch, kernel_size=3, padding=1) if temb_dim is not None: self.Dense_0 = nn.Linear(temb_dim, out_ch) - self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape) + self.Dense_0.weight.data = variance_scaling()(self.Dense_0.weight.shape) nn.init.zeros_(self.Dense_0.bias) self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) self.Dropout_0 = nn.Dropout(dropout) - self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) + self.Conv_1 = conv2d(out_ch, out_ch, init_scale=init_scale, kernel_size=3, padding=1) if in_ch != out_ch or up or down: - self.Conv_2 = conv1x1(in_ch, out_ch) + #1x1 convolution with DDPM initialization. + self.Conv_2 = conv2d(in_ch, out_ch, kernel_size=1, padding=0) self.skip_rescale = skip_rescale self.act = act @@ -754,32 +755,19 @@ class RearrangeDim(nn.Module): raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") -def conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1.0, padding=0): - """1x1 convolution with DDPM initialization.""" - conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias) - conv.weight.data = default_init(init_scale)(conv.weight.data.shape) - nn.init.zeros_(conv.bias) - return conv - - -def conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0, padding=1): - """3x3 convolution with DDPM initialization.""" +def conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale=1.0, padding=1): + """nXn convolution with DDPM initialization.""" conv = nn.Conv2d( - in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias ) - conv.weight.data = default_init(init_scale)(conv.weight.data.shape) + conv.weight.data = variance_scaling(init_scale)(conv.weight.data.shape) nn.init.zeros_(conv.bias) return conv -def default_init(scale=1.0): - """The same initialization used in DDPM.""" - scale = 1e-10 if scale == 0 else scale - return variance_scaling(scale, "fan_avg", "uniform") - - -def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu"): +def variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu"): """Ported from JAX.""" + scale = 1e-10 if scale == 0 else scale def _compute_fans(shape, in_axis=1, out_axis=0): receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis] @@ -789,21 +777,9 @@ def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0, dtype=tor def init(shape, dtype=dtype, device=device): fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) - if mode == "fan_in": - denominator = fan_in - elif mode == "fan_out": - denominator = fan_out - elif mode == "fan_avg": - denominator = (fan_in + fan_out) / 2 - else: - raise ValueError("invalid mode for variance scaling initializer: {}".format(mode)) + denominator = (fan_in + fan_out) / 2 variance = scale / denominator - if distribution == "normal": - return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance) - elif distribution == "uniform": - return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0) * np.sqrt(3 * variance) - else: - raise ValueError("invalid distribution for variance scaling initializer") + return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0) * np.sqrt(3 * variance) return init diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 890678f1c6..30db349395 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -287,20 +287,12 @@ def downsample_2d(x, k=None, factor=2, gain=1): return upfirdn2d(x, torch.tensor(k, device=x.device), down=factor, pad=((p + 1) // 2, p // 2)) -def conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1.0, padding=0): - """1x1 convolution with DDPM initialization.""" - conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias) - conv.weight.data = default_init(init_scale)(conv.weight.data.shape) - nn.init.zeros_(conv.bias) - return conv - - -def conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0, padding=1): - """3x3 convolution with DDPM initialization.""" +def conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale=1.0, padding=1): + """nXn convolution with DDPM initialization.""" conv = nn.Conv2d( - in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias ) - conv.weight.data = default_init(init_scale)(conv.weight.data.shape) + conv.weight.data = variance_scaling(init_scale)(conv.weight.data.shape) nn.init.zeros_(conv.bias) return conv @@ -320,14 +312,9 @@ def get_act(nonlinearity): raise NotImplementedError("activation function does not exist!") -def default_init(scale=1.0): - """The same initialization used in DDPM.""" - scale = 1e-10 if scale == 0 else scale - return variance_scaling(scale, "fan_avg", "uniform") - - -def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu"): +def variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu"): """Ported from JAX.""" + scale = 1e-10 if scale == 0 else scale def _compute_fans(shape, in_axis=1, out_axis=0): receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis] @@ -337,21 +324,9 @@ def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0, dtype=tor def init(shape, dtype=dtype, device=device): fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) - if mode == "fan_in": - denominator = fan_in - elif mode == "fan_out": - denominator = fan_out - elif mode == "fan_avg": - denominator = (fan_in + fan_out) / 2 - else: - raise ValueError("invalid mode for variance scaling initializer: {}".format(mode)) + denominator = (fan_in + fan_out) / 2 variance = scale / denominator - if distribution == "normal": - return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance) - elif distribution == "uniform": - return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0) * np.sqrt(3 * variance) - else: - raise ValueError("invalid distribution for variance scaling initializer") + return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0) * np.sqrt(3 * variance) return init @@ -361,7 +336,8 @@ class Combine(nn.Module): def __init__(self, dim1, dim2, method="cat"): super().__init__() - self.Conv_0 = conv1x1(dim1, dim2) + #1x1 convolution with DDPM initialization. + self.Conv_0 = conv2d(dim1, dim2, kernel_size=1, padding=0) self.method = method def forward(self, x, y): @@ -386,7 +362,7 @@ class Upsample(nn.Module): up=True, resample_kernel=fir_kernel, use_bias=True, - kernel_init=default_init(), + kernel_init=variance_scaling(), ) self.fir = fir self.with_conv = with_conv @@ -415,7 +391,7 @@ class Downsample(nn.Module): down=True, resample_kernel=fir_kernel, use_bias=True, - kernel_init=default_init(), + kernel_init=variance_scaling(), ) self.fir = fir self.fir_kernel = fir_kernel @@ -528,10 +504,10 @@ class NCSNpp(ModelMixin, ConfigMixin): if conditional: modules.append(nn.Linear(embed_dim, nf * 4)) - modules[-1].weight.data = default_init()(modules[-1].weight.shape) + modules[-1].weight.data = variance_scaling()(modules[-1].weight.shape) nn.init.zeros_(modules[-1].bias) modules.append(nn.Linear(nf * 4, nf * 4)) - modules[-1].weight.data = default_init()(modules[-1].weight.shape) + modules[-1].weight.data = variance_scaling()(modules[-1].weight.shape) nn.init.zeros_(modules[-1].bias) AttnBlock = functools.partial(AttentionBlock, overwrite_linear=True, rescale_output_factor=math.sqrt(2.0)) @@ -566,7 +542,7 @@ class NCSNpp(ModelMixin, ConfigMixin): if progressive_input != "none": input_pyramid_ch = channels - modules.append(conv3x3(channels, nf)) + modules.append(conv2d(channels, nf, kernel_size=3, padding=1)) hs_c = [nf] in_ch = nf @@ -615,18 +591,18 @@ class NCSNpp(ModelMixin, ConfigMixin): if i_level == self.num_resolutions - 1: if progressive == "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, channels, init_scale=init_scale)) + modules.append(conv2d(in_ch, channels, init_scale=init_scale, kernel_size=3, padding=1)) pyramid_ch = channels elif progressive == "residual": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, in_ch, bias=True)) + modules.append(conv2d(in_ch, in_ch, bias=True, kernel_size=3, padding=1)) pyramid_ch = in_ch else: raise ValueError(f"{progressive} is not a valid name.") else: if progressive == "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale)) + modules.append(conv2d(in_ch, channels, bias=True, init_scale=init_scale, kernel_size=3, padding=1)) pyramid_ch = channels elif progressive == "residual": modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch)) @@ -641,7 +617,7 @@ class NCSNpp(ModelMixin, ConfigMixin): if progressive != "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, channels, init_scale=init_scale)) + modules.append(conv2d(in_ch, channels, init_scale=init_scale)) self.all_modules = nn.ModuleList(modules) From 13ac40ed8ed33d75910b14575788c0eab0cbbe75 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 12:21:04 +0200 Subject: [PATCH 10/32] style --- src/diffusers/models/resnet.py | 6 ++---- src/diffusers/models/unet_sde_score_estimation.py | 10 +++++----- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index c206859b70..5cc5530625 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -603,7 +603,7 @@ class ResnetBlockBigGANpp(nn.Module): self.Dropout_0 = nn.Dropout(dropout) self.Conv_1 = conv2d(out_ch, out_ch, init_scale=init_scale, kernel_size=3, padding=1) if in_ch != out_ch or up or down: - #1x1 convolution with DDPM initialization. + # 1x1 convolution with DDPM initialization. self.Conv_2 = conv2d(in_ch, out_ch, kernel_size=1, padding=0) self.skip_rescale = skip_rescale @@ -757,9 +757,7 @@ class RearrangeDim(nn.Module): def conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale=1.0, padding=1): """nXn convolution with DDPM initialization.""" - conv = nn.Conv2d( - in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias - ) + conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) conv.weight.data = variance_scaling(init_scale)(conv.weight.data.shape) nn.init.zeros_(conv.bias) return conv diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 30db349395..6f909dcf3b 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -289,9 +289,7 @@ def downsample_2d(x, k=None, factor=2, gain=1): def conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale=1.0, padding=1): """nXn convolution with DDPM initialization.""" - conv = nn.Conv2d( - in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias - ) + conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) conv.weight.data = variance_scaling(init_scale)(conv.weight.data.shape) nn.init.zeros_(conv.bias) return conv @@ -336,7 +334,7 @@ class Combine(nn.Module): def __init__(self, dim1, dim2, method="cat"): super().__init__() - #1x1 convolution with DDPM initialization. + # 1x1 convolution with DDPM initialization. self.Conv_0 = conv2d(dim1, dim2, kernel_size=1, padding=0) self.method = method @@ -602,7 +600,9 @@ class NCSNpp(ModelMixin, ConfigMixin): else: if progressive == "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv2d(in_ch, channels, bias=True, init_scale=init_scale, kernel_size=3, padding=1)) + modules.append( + conv2d(in_ch, channels, bias=True, init_scale=init_scale, kernel_size=3, padding=1) + ) pyramid_ch = channels elif progressive == "residual": modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch)) From f1cb807496ab456c52b48574171aaa83902fab6d Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 12:24:47 +0200 Subject: [PATCH 11/32] remove get_act --- .../models/unet_sde_score_estimation.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 6f909dcf3b..6eed6791d0 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -295,21 +295,6 @@ def conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale return conv -def get_act(nonlinearity): - """Get activation functions from the config file.""" - - if nonlinearity.lower() == "elu": - return nn.ELU() - elif nonlinearity.lower() == "relu": - return nn.ReLU() - elif nonlinearity.lower() == "lrelu": - return nn.LeakyReLU(negative_slope=0.2) - elif nonlinearity.lower() == "swish": - return nn.SiLU() - else: - raise NotImplementedError("activation function does not exist!") - - def variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu"): """Ported from JAX.""" scale = 1e-10 if scale == 0 else scale @@ -467,7 +452,7 @@ class NCSNpp(ModelMixin, ConfigMixin): skip_rescale=skip_rescale, continuous=continuous, ) - self.act = act = get_act(nonlinearity) + self.act = act = nn.SiLU() self.nf = nf self.num_res_blocks = num_res_blocks From c50d997591d14dfa2030b015d2a5934add658b1d Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 12:29:45 +0200 Subject: [PATCH 12/32] remove unused args --- .../models/unet_sde_score_estimation.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 6eed6791d0..92b8ba9d45 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -396,10 +396,8 @@ class NCSNpp(ModelMixin, ConfigMixin): def __init__( self, - centered=False, image_size=1024, num_channels=3, - attention_type="ddpm", attn_resolutions=(16,), ch_mult=(1, 2, 4, 8, 16, 32, 32, 32), conditional=True, @@ -411,24 +409,19 @@ class NCSNpp(ModelMixin, ConfigMixin): fourier_scale=16, init_scale=0.0, nf=16, - nonlinearity="swish", - normalization="GroupNorm", num_res_blocks=1, progressive="output_skip", progressive_combine="sum", progressive_input="input_skip", resamp_with_conv=True, - resblock_type="biggan", scale_by_sigma=True, skip_rescale=True, continuous=True, ): super().__init__() self.register_to_config( - centered=centered, image_size=image_size, num_channels=num_channels, - attention_type=attention_type, attn_resolutions=attn_resolutions, ch_mult=ch_mult, conditional=conditional, @@ -440,14 +433,11 @@ class NCSNpp(ModelMixin, ConfigMixin): fourier_scale=fourier_scale, init_scale=init_scale, nf=nf, - nonlinearity=nonlinearity, - normalization=normalization, num_res_blocks=num_res_blocks, progressive=progressive, progressive_combine=progressive_combine, progressive_input=progressive_input, resamp_with_conv=resamp_with_conv, - resblock_type=resblock_type, scale_by_sigma=scale_by_sigma, skip_rescale=skip_rescale, continuous=continuous, @@ -462,7 +452,6 @@ class NCSNpp(ModelMixin, ConfigMixin): self.conditional = conditional self.skip_rescale = skip_rescale - self.resblock_type = resblock_type self.progressive = progressive self.progressive_input = progressive_input self.embedding_type = embedding_type @@ -633,9 +622,8 @@ class NCSNpp(ModelMixin, ConfigMixin): else: temb = None - if not self.config.centered: - # If input data is in [0, 1] - x = 2 * x - 1.0 + # If input data is in [0, 1] + x = 2 * x - 1.0 # Downsampling block input_pyramid = None From 663393e28a4dcc5ec753d6ed58e8c8afd942cc45 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 12:33:52 +0200 Subject: [PATCH 13/32] remove fir option --- src/diffusers/models/resnet.py | 2 -- .../models/unet_sde_score_estimation.py | 24 +++++++------------ 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 5cc5530625..80ccdd77f7 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -579,7 +579,6 @@ class ResnetBlockBigGANpp(nn.Module): up=False, down=False, dropout=0.1, - fir=False, fir_kernel=(1, 3, 3, 1), skip_rescale=True, init_scale=0.0, @@ -590,7 +589,6 @@ class ResnetBlockBigGANpp(nn.Module): self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) self.up = up self.down = down - self.fir = fir self.fir_kernel = fir_kernel self.Conv_0 = conv2d(in_ch, out_ch, kernel_size=3, padding=1) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 92b8ba9d45..f2f8821f32 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -334,7 +334,7 @@ class Combine(nn.Module): class Upsample(nn.Module): - def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, fir_kernel=(1, 3, 3, 1)): + def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_ch = out_ch if out_ch else in_ch if with_conv: @@ -347,13 +347,11 @@ class Upsample(nn.Module): use_bias=True, kernel_init=variance_scaling(), ) - self.fir = fir self.with_conv = with_conv self.fir_kernel = fir_kernel self.out_ch = out_ch def forward(self, x): - B, C, H, W = x.shape if not self.with_conv: h = upsample_2d(x, self.fir_kernel, factor=2) else: @@ -363,7 +361,7 @@ class Upsample(nn.Module): class Downsample(nn.Module): - def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, fir_kernel=(1, 3, 3, 1)): + def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_ch = out_ch if out_ch else in_ch if with_conv: @@ -376,13 +374,11 @@ class Downsample(nn.Module): use_bias=True, kernel_init=variance_scaling(), ) - self.fir = fir self.fir_kernel = fir_kernel self.with_conv = with_conv self.out_ch = out_ch def forward(self, x): - B, C, H, W = x.shape if not self.with_conv: x = downsample_2d(x, self.fir_kernel, factor=2) else: @@ -404,7 +400,7 @@ class NCSNpp(ModelMixin, ConfigMixin): conv_size=3, dropout=0.0, embedding_type="fourier", - fir=True, + fir=True, # TODO (patil-suraj) remove this option from here and pre-trained model configs fir_kernel=(1, 3, 3, 1), fourier_scale=16, init_scale=0.0, @@ -428,7 +424,6 @@ class NCSNpp(ModelMixin, ConfigMixin): conv_size=conv_size, dropout=dropout, embedding_type=embedding_type, - fir=fir, fir_kernel=fir_kernel, fourier_scale=fourier_scale, init_scale=init_scale, @@ -483,25 +478,24 @@ class NCSNpp(ModelMixin, ConfigMixin): nn.init.zeros_(modules[-1].bias) AttnBlock = functools.partial(AttentionBlock, overwrite_linear=True, rescale_output_factor=math.sqrt(2.0)) - Up_sample = functools.partial(Upsample, with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel) + Up_sample = functools.partial(Upsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) if progressive == "output_skip": - self.pyramid_upsample = Up_sample(fir=fir, fir_kernel=fir_kernel, with_conv=False) + self.pyramid_upsample = Up_sample(fir_kernel=fir_kernel, with_conv=False) elif progressive == "residual": - pyramid_upsample = functools.partial(Up_sample, fir=fir, fir_kernel=fir_kernel, with_conv=True) + pyramid_upsample = functools.partial(Up_sample, fir_kernel=fir_kernel, with_conv=True) - Down_sample = functools.partial(Downsample, with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel) + Down_sample = functools.partial(Downsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) if progressive_input == "input_skip": - self.pyramid_downsample = Down_sample(fir=fir, fir_kernel=fir_kernel, with_conv=False) + self.pyramid_downsample = Down_sample(fir_kernel=fir_kernel, with_conv=False) elif progressive_input == "residual": - pyramid_downsample = functools.partial(Down_sample, fir=fir, fir_kernel=fir_kernel, with_conv=True) + pyramid_downsample = functools.partial(Down_sample, fir_kernel=fir_kernel, with_conv=True) ResnetBlock = functools.partial( ResnetBlockBigGANpp, act=act, dropout=dropout, - fir=fir, fir_kernel=fir_kernel, init_scale=init_scale, skip_rescale=skip_rescale, From 639b8611293c7098ea0e15f5390dd8e51bf1f54e Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 13:18:09 +0200 Subject: [PATCH 14/32] get rid of the custom conv2d layer for up/down sampling --- .../models/unet_sde_score_estimation.py | 200 ++---------------- 1 file changed, 20 insertions(+), 180 deletions(-) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index f2f8821f32..5a67b1ffb1 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -17,7 +17,6 @@ import functools import math -import string import numpy as np import torch @@ -28,99 +27,21 @@ from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import GaussianFourierProjection, get_timestep_embedding -from .resnet import ResnetBlockBigGANpp +from .resnet import ResnetBlockBigGANpp, downsample_2d, upfirdn2d, upsample_2d -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) +def _setup_kernel(k): + k = np.asarray(k, dtype=np.float32) + if k.ndim == 1: + k = np.outer(k, k) + k /= np.sum(k) + assert k.ndim == 2 + assert k.shape[0] == k.shape[1] + return k -def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) - - -# Function ported from StyleGAN2 -def get_weight(module, shape, weight_var="weight", kernel_init=None): - """Get/create weight tensor for a convolution or fully-connected layer.""" - - return module.param(weight_var, kernel_init, shape) - - -class Conv2d(nn.Module): - """Conv2d layer with optimal upsampling and downsampling (StyleGAN2).""" - - def __init__( - self, - in_ch, - out_ch, - kernel, - up=False, - down=False, - resample_kernel=(1, 3, 3, 1), - use_bias=True, - kernel_init=None, - ): - super().__init__() - assert not (up and down) - assert kernel >= 1 and kernel % 2 == 1 - self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel)) - if kernel_init is not None: - self.weight.data = kernel_init(self.weight.data.shape) - if use_bias: - self.bias = nn.Parameter(torch.zeros(out_ch)) - - self.up = up - self.down = down - self.resample_kernel = resample_kernel - self.kernel = kernel - self.use_bias = use_bias - - def forward(self, x): - if self.up: - x = upsample_conv_2d(x, self.weight, k=self.resample_kernel) - elif self.down: - x = conv_downsample_2d(x, self.weight, k=self.resample_kernel) - else: - x = F.conv2d(x, self.weight, stride=1, padding=self.kernel // 2) - - if self.use_bias: - x = x + self.bias.reshape(1, -1, 1, 1) - - return x +def _shape(x, dim): + return x.shape[dim] def upsample_conv_2d(x, w, k=None, factor=2, gain=1): @@ -222,71 +143,6 @@ def conv_downsample_2d(x, w, k=None, factor=2, gain=1): return F.conv2d(x, w, stride=s, padding=0) -def _setup_kernel(k): - k = np.asarray(k, dtype=np.float32) - if k.ndim == 1: - k = np.outer(k, k) - k /= np.sum(k) - assert k.ndim == 2 - assert k.shape[0] == k.shape[1] - return k - - -def _shape(x, dim): - return x.shape[dim] - - -def upsample_2d(x, k=None, factor=2, gain=1): - r"""Upsample a batch of 2D images with the given filter. - - Args: - Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given - filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified - `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a: - multiple of the upsampling factor. - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, - C]`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` - (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. - factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). - - Returns: - Tensor of the shape `[N, C, H * factor, W * factor]` - """ - assert isinstance(factor, int) and factor >= 1 - if k is None: - k = [1] * factor - k = _setup_kernel(k) * (gain * (factor**2)) - p = k.shape[0] - factor - return upfirdn2d(x, torch.tensor(k, device=x.device), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) - - -def downsample_2d(x, k=None, factor=2, gain=1): - r"""Downsample a batch of 2D images with the given filter. - - Args: - Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the - given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the - specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its - shape is a multiple of the downsampling factor. - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, - C]`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` - (separable). The default is `[1] * factor`, which corresponds to average pooling. - factor: Integer downsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). - - Returns: - Tensor of the shape `[N, C, H // factor, W // factor]` - """ - - assert isinstance(factor, int) and factor >= 1 - if k is None: - k = [1] * factor - k = _setup_kernel(k) * gain - p = k.shape[0] - factor - return upfirdn2d(x, torch.tensor(k, device=x.device), down=factor, pad=((p + 1) // 2, p // 2)) - - def conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale=1.0, padding=1): """nXn convolution with DDPM initialization.""" conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) @@ -338,24 +194,16 @@ class Upsample(nn.Module): super().__init__() out_ch = out_ch if out_ch else in_ch if with_conv: - self.Conv2d_0 = Conv2d( - in_ch, - out_ch, - kernel=3, - up=True, - resample_kernel=fir_kernel, - use_bias=True, - kernel_init=variance_scaling(), - ) + self.Conv2d_0 = conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) self.with_conv = with_conv self.fir_kernel = fir_kernel self.out_ch = out_ch def forward(self, x): - if not self.with_conv: - h = upsample_2d(x, self.fir_kernel, factor=2) + if self.with_conv: + h = upsample_conv_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) else: - h = self.Conv2d_0(x) + h = upsample_2d(x, self.fir_kernel, factor=2) return h @@ -365,24 +213,16 @@ class Downsample(nn.Module): super().__init__() out_ch = out_ch if out_ch else in_ch if with_conv: - self.Conv2d_0 = Conv2d( - in_ch, - out_ch, - kernel=3, - down=True, - resample_kernel=fir_kernel, - use_bias=True, - kernel_init=variance_scaling(), - ) + self.Conv2d_0 = self.Conv2d_0 = conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) self.fir_kernel = fir_kernel self.with_conv = with_conv self.out_ch = out_ch def forward(self, x): - if not self.with_conv: - x = downsample_2d(x, self.fir_kernel, factor=2) + if self.with_conv: + x = conv_downsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) else: - x = self.Conv2d_0(x) + x = downsample_2d(x, self.fir_kernel, factor=2) return x @@ -400,7 +240,7 @@ class NCSNpp(ModelMixin, ConfigMixin): conv_size=3, dropout=0.0, embedding_type="fourier", - fir=True, # TODO (patil-suraj) remove this option from here and pre-trained model configs + fir=True, # TODO (patil-suraj) remove this option from here and pre-trained model configs fir_kernel=(1, 3, 3, 1), fourier_scale=16, init_scale=0.0, From 3e2cff4da25642e964c48fa44d7c00d3314b1ce8 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 13:26:05 +0200 Subject: [PATCH 15/32] better names and more cleanup --- .../models/unet_sde_score_estimation.py | 79 ++++++++----------- 1 file changed, 33 insertions(+), 46 deletions(-) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 5a67b1ffb1..48e25bea7d 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -40,12 +40,8 @@ def _setup_kernel(k): return k -def _shape(x, dim): - return x.shape[dim] - - -def upsample_conv_2d(x, w, k=None, factor=2, gain=1): - """Fused `upsample_2d()` followed by `tf.nn.conv2d()`. +def _upsample_conv_2d(x, w, k=None, factor=2, gain=1): + """Fused `upsample_2d()` followed by `Conv2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more @@ -84,13 +80,13 @@ def upsample_conv_2d(x, w, k=None, factor=2, gain=1): # Determine data dimensions. stride = [1, 1, factor, factor] - output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) + output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW) output_padding = ( - output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, - output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, + output_shape[0] - (x.shape[2] - 1) * stride[0] - convH, + output_shape[1] - (x.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 - num_groups = _shape(x, 1) // inC + num_groups = x.shape[1] // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) @@ -98,21 +94,12 @@ def upsample_conv_2d(x, w, k=None, factor=2, gain=1): w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) - # Original TF code. - # x = tf.nn.conv2d_transpose( - # x, - # w, - # output_shape=output_shape, - # strides=stride, - # padding='VALID', - # data_format=data_format) - # JAX equivalent return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) -def conv_downsample_2d(x, w, k=None, factor=2, gain=1): - """Fused `tf.nn.conv2d()` followed by `downsample_2d()`. +def _conv_downsample_2d(x, w, k=None, factor=2, gain=1): + """Fused `Conv2d()` followed by `downsample_2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more @@ -143,15 +130,7 @@ def conv_downsample_2d(x, w, k=None, factor=2, gain=1): return F.conv2d(x, w, stride=s, padding=0) -def conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale=1.0, padding=1): - """nXn convolution with DDPM initialization.""" - conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) - conv.weight.data = variance_scaling(init_scale)(conv.weight.data.shape) - nn.init.zeros_(conv.bias) - return conv - - -def variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu"): +def _variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu"): """Ported from JAX.""" scale = 1e-10 if scale == 0 else scale @@ -170,13 +149,21 @@ def variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, devi return init +def Conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale=1.0, padding=1): + """nXn convolution with DDPM initialization.""" + conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) + conv.weight.data = _variance_scaling(init_scale)(conv.weight.data.shape) + nn.init.zeros_(conv.bias) + return conv + + class Combine(nn.Module): """Combine information from skip connections.""" def __init__(self, dim1, dim2, method="cat"): super().__init__() # 1x1 convolution with DDPM initialization. - self.Conv_0 = conv2d(dim1, dim2, kernel_size=1, padding=0) + self.Conv_0 = Conv2d(dim1, dim2, kernel_size=1, padding=0) self.method = method def forward(self, x, y): @@ -189,38 +176,38 @@ class Combine(nn.Module): raise ValueError(f"Method {self.method} not recognized.") -class Upsample(nn.Module): +class FirUpsample(nn.Module): def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_ch = out_ch if out_ch else in_ch if with_conv: - self.Conv2d_0 = conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) + self.Conv2d_0 = Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) self.with_conv = with_conv self.fir_kernel = fir_kernel self.out_ch = out_ch def forward(self, x): if self.with_conv: - h = upsample_conv_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) + h = _upsample_conv_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) else: h = upsample_2d(x, self.fir_kernel, factor=2) return h -class Downsample(nn.Module): +class FirDownsample(nn.Module): def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_ch = out_ch if out_ch else in_ch if with_conv: - self.Conv2d_0 = self.Conv2d_0 = conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) + self.Conv2d_0 = self.Conv2d_0 = Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) self.fir_kernel = fir_kernel self.with_conv = with_conv self.out_ch = out_ch def forward(self, x): if self.with_conv: - x = conv_downsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) + x = _conv_downsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) else: x = downsample_2d(x, self.fir_kernel, factor=2) @@ -311,21 +298,21 @@ class NCSNpp(ModelMixin, ConfigMixin): if conditional: modules.append(nn.Linear(embed_dim, nf * 4)) - modules[-1].weight.data = variance_scaling()(modules[-1].weight.shape) + modules[-1].weight.data = _variance_scaling()(modules[-1].weight.shape) nn.init.zeros_(modules[-1].bias) modules.append(nn.Linear(nf * 4, nf * 4)) - modules[-1].weight.data = variance_scaling()(modules[-1].weight.shape) + modules[-1].weight.data = _variance_scaling()(modules[-1].weight.shape) nn.init.zeros_(modules[-1].bias) AttnBlock = functools.partial(AttentionBlock, overwrite_linear=True, rescale_output_factor=math.sqrt(2.0)) - Up_sample = functools.partial(Upsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) + Up_sample = functools.partial(FirUpsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) if progressive == "output_skip": self.pyramid_upsample = Up_sample(fir_kernel=fir_kernel, with_conv=False) elif progressive == "residual": pyramid_upsample = functools.partial(Up_sample, fir_kernel=fir_kernel, with_conv=True) - Down_sample = functools.partial(Downsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) + Down_sample = functools.partial(FirDownsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) if progressive_input == "input_skip": self.pyramid_downsample = Down_sample(fir_kernel=fir_kernel, with_conv=False) @@ -348,7 +335,7 @@ class NCSNpp(ModelMixin, ConfigMixin): if progressive_input != "none": input_pyramid_ch = channels - modules.append(conv2d(channels, nf, kernel_size=3, padding=1)) + modules.append(Conv2d(channels, nf, kernel_size=3, padding=1)) hs_c = [nf] in_ch = nf @@ -397,11 +384,11 @@ class NCSNpp(ModelMixin, ConfigMixin): if i_level == self.num_resolutions - 1: if progressive == "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv2d(in_ch, channels, init_scale=init_scale, kernel_size=3, padding=1)) + modules.append(Conv2d(in_ch, channels, init_scale=init_scale, kernel_size=3, padding=1)) pyramid_ch = channels elif progressive == "residual": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv2d(in_ch, in_ch, bias=True, kernel_size=3, padding=1)) + modules.append(Conv2d(in_ch, in_ch, bias=True, kernel_size=3, padding=1)) pyramid_ch = in_ch else: raise ValueError(f"{progressive} is not a valid name.") @@ -409,7 +396,7 @@ class NCSNpp(ModelMixin, ConfigMixin): if progressive == "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) modules.append( - conv2d(in_ch, channels, bias=True, init_scale=init_scale, kernel_size=3, padding=1) + Conv2d(in_ch, channels, bias=True, init_scale=init_scale, kernel_size=3, padding=1) ) pyramid_ch = channels elif progressive == "residual": @@ -425,7 +412,7 @@ class NCSNpp(ModelMixin, ConfigMixin): if progressive != "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) - modules.append(conv2d(in_ch, channels, init_scale=init_scale)) + modules.append(Conv2d(in_ch, channels, init_scale=init_scale)) self.all_modules = nn.ModuleList(modules) From f35387b33f95bc0bce367816653dd3a388cf501f Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 13:31:47 +0200 Subject: [PATCH 16/32] clean Linear --- .../models/unet_sde_score_estimation.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 48e25bea7d..9c82e53e70 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -157,6 +157,13 @@ def Conv2d(in_planes, out_planes, kernel_size=3, stride=1, bias=True, init_scale return conv +def Linear(dim_in, dim_out): + linear = nn.Linear(dim_in, dim_out) + linear.weight.data = _variance_scaling()(linear.weight.shape) + nn.init.zeros_(linear.bias) + return linear + + class Combine(nn.Module): """Combine information from skip connections.""" @@ -296,13 +303,8 @@ class NCSNpp(ModelMixin, ConfigMixin): else: raise ValueError(f"embedding type {embedding_type} unknown.") - if conditional: - modules.append(nn.Linear(embed_dim, nf * 4)) - modules[-1].weight.data = _variance_scaling()(modules[-1].weight.shape) - nn.init.zeros_(modules[-1].bias) - modules.append(nn.Linear(nf * 4, nf * 4)) - modules[-1].weight.data = _variance_scaling()(modules[-1].weight.shape) - nn.init.zeros_(modules[-1].bias) + modules.append(Linear(embed_dim, nf * 4)) + modules.append(Linear(nf * 4, nf * 4)) AttnBlock = functools.partial(AttentionBlock, overwrite_linear=True, rescale_output_factor=math.sqrt(2.0)) Up_sample = functools.partial(FirUpsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) From c54f36f0872f0cf131a0880cb80b69cc9ad8f346 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Thu, 30 Jun 2022 13:52:16 +0200 Subject: [PATCH 17/32] style --- src/diffusers/dependency_versions_table.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/dependency_versions_table.py b/src/diffusers/dependency_versions_table.py index 833f726179..7ed796a1fe 100644 --- a/src/diffusers/dependency_versions_table.py +++ b/src/diffusers/dependency_versions_table.py @@ -14,4 +14,5 @@ deps = { "requests": "requests", "torch": "torch>=1.4", "tensorboard": "tensorboard", + "modelcards": "modelcards==0.1.4", } From 3dbd6a8f4d7f3f27df3e3433fc2ab9fe1e7a873d Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 14:54:31 +0000 Subject: [PATCH 18/32] up --- src/diffusers/models/resnet.py | 3 +++ tests/test_modeling_utils.py | 22 ++++++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 80ccdd77f7..5875628352 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -207,6 +207,9 @@ class ResBlock(TimestepBlock): self.updown = up or down +# if self.updown: +# import ipdb; ipdb.set_trace() + if up: self.h_upd = Upsample(channels, use_conv=False, dims=dims) self.x_upd = Upsample(channels, use_conv=False, dims=dims) diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 743bef0e0e..08d70ca58c 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -259,7 +259,7 @@ class UnetModelTests(ModelTesterMixin, unittest.TestCase): # fmt: off expected_output_slice = torch.tensor([0.2891, -0.1899, 0.2595, -0.6214, 0.0968, -0.2622, 0.4688, 0.1311, 0.0053]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) class GlideSuperResUNetTests(ModelTesterMixin, unittest.TestCase): @@ -607,7 +607,7 @@ class UNetGradTTSModelTests(ModelTesterMixin, unittest.TestCase): expected_output_slice = torch.tensor([-0.0690, -0.0531, 0.0633, -0.0660, -0.0541, 0.0650, -0.0656, -0.0555, 0.0617]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): @@ -678,7 +678,7 @@ class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): @@ -753,7 +753,7 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): expected_output_slice = torch.tensor([3.1909e-07, -8.5393e-08, 4.8460e-07, -4.5550e-07, -1.3205e-06, -6.3475e-07, 9.7837e-07, 2.9974e-07, 1.2345e-06]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) def test_output_pretrained_ve_large(self): model = NCSNpp.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy") @@ -779,7 +779,7 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): expected_output_slice = torch.tensor([-8.3299e-07, -9.0431e-07, 4.0585e-08, 9.7563e-07, 1.0280e-06, 1.0133e-06, 1.4979e-06, -2.9716e-07, -6.1817e-07]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) def test_output_pretrained_vp(self): model = NCSNpp.from_pretrained("fusing/ddpm-cifar10-vp-dummy") @@ -805,7 +805,7 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): expected_output_slice = torch.tensor([-3.9086e-07, -1.1001e-05, 1.8881e-06, 1.1106e-05, 1.6629e-06, 2.9820e-06, 8.4978e-06, 8.0253e-07, 1.5435e-06]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) class VQModelTests(ModelTesterMixin, unittest.TestCase): @@ -878,10 +878,9 @@ class VQModelTests(ModelTesterMixin, unittest.TestCase): output_slice = output[0, -1, -3:, -3:].flatten() # fmt: off - expected_output_slice = torch.tensor([-1.1321, 0.1056, 0.3505, -0.6461, -0.2014, 0.0419, -0.5763, -0.8462, - -0.4218]) + expected_output_slice = torch.tensor([-1.1321, 0.1056, 0.3505, -0.6461, -0.2014, 0.0419, -0.5763, -0.8462, -0.4218]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) class AutoEncoderKLTests(ModelTesterMixin, unittest.TestCase): @@ -950,10 +949,9 @@ class AutoEncoderKLTests(ModelTesterMixin, unittest.TestCase): output_slice = output[0, -1, -3:, -3:].flatten() # fmt: off - expected_output_slice = torch.tensor([-0.0814, -0.0229, -0.1320, -0.4123, -0.0366, -0.3473, 0.0438, -0.1662, - 0.1750]) + expected_output_slice = torch.tensor([-0.0814, -0.0229, -0.1320, -0.4123, -0.0366, -0.3473, 0.0438, -0.1662, 0.1750]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) class PipelineTesterMixin(unittest.TestCase): From c1c4dea98d80c8347e818133ec38cba2d89198c4 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 15:54:00 +0000 Subject: [PATCH 19/32] correct tests ncsnpp --- tests/test_modeling_utils.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 08d70ca58c..0f63d86d9f 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -742,18 +742,18 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): num_channels = 3 sizes = (32, 32) - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [10]).to(torch_device) + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step) output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off - expected_output_slice = torch.tensor([3.1909e-07, -8.5393e-08, 4.8460e-07, -4.5550e-07, -1.3205e-06, -6.3475e-07, 9.7837e-07, 2.9974e-07, 1.2345e-06]) + expected_output_slice = torch.tensor([0.1315, 0.0741, 0.0393, 0.0455, 0.0556, 0.0180, -0.0832, -0.0644, -0.0856]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) def test_output_pretrained_ve_large(self): model = NCSNpp.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy") @@ -768,21 +768,21 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): num_channels = 3 sizes = (32, 32) - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [10]).to(torch_device) + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step) output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off - expected_output_slice = torch.tensor([-8.3299e-07, -9.0431e-07, 4.0585e-08, 9.7563e-07, 1.0280e-06, 1.0133e-06, 1.4979e-06, -2.9716e-07, -6.1817e-07]) + expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) def test_output_pretrained_vp(self): - model = NCSNpp.from_pretrained("fusing/ddpm-cifar10-vp-dummy") + model = NCSNpp.from_pretrained("fusing/cifar10-ddpmpp-vp") model.eval() model.to(torch_device) @@ -794,18 +794,18 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): num_channels = 3 sizes = (32, 32) - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [10]).to(torch_device) + noise = torch.randn((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [9.]).to(torch_device) with torch.no_grad(): output = model(noise, time_step) output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off - expected_output_slice = torch.tensor([-3.9086e-07, -1.1001e-05, 1.8881e-06, 1.1106e-05, 1.6629e-06, 2.9820e-06, 8.4978e-06, 8.0253e-07, 1.5435e-06]) + expected_output_slice = torch.tensor([0.3303, -0.2275, -2.8872, -0.1309, -1.2861, 3.4567, -1.0083, 2.5325, -1.3866]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) class VQModelTests(ModelTesterMixin, unittest.TestCase): From 185347e411247ae9c6d8ace910dc3f876958bee1 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 17:01:06 +0000 Subject: [PATCH 20/32] up --- src/diffusers/models/resnet.py | 39 ++++++++++++++++------------------ 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 5875628352..29fc6a8f00 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -207,9 +207,6 @@ class ResBlock(TimestepBlock): self.updown = up or down -# if self.updown: -# import ipdb; ipdb.set_trace() - if up: self.h_upd = Upsample(channels, use_conv=False, dims=dims) self.x_upd = Upsample(channels, use_conv=False, dims=dims) @@ -227,8 +224,10 @@ class ResBlock(TimestepBlock): ), ) self.out_layers = nn.Sequential( - normalization(self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0), - nn.SiLU() if use_scale_shift_norm else nn.Identity(), +# normalization(self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0), +# nn.SiLU() if use_scale_shift_norm else nn.Identity(), + normalization(self.out_channels, swish=0.0), + nn.SiLU(), nn.Dropout(p=dropout), zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)), ) @@ -322,6 +321,7 @@ class ResBlock(TimestepBlock): emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] + if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] scale, shift = torch.chunk(emb_out, 2, dim=1) @@ -338,35 +338,31 @@ class ResBlock(TimestepBlock): return result - def forward_2(self, x, temb, mask=1.0): + def forward_2(self, x, temb): if self.overwrite and not self.is_overwritten: self.set_weights() self.is_overwritten = True h = x - if self.pre_norm: - h = self.norm1(h) - h = self.nonlinearity(h) + h = self.norm1(h) + h = self.nonlinearity(h) h = self.conv1(h) - if not self.pre_norm: - h = self.norm1(h) - h = self.nonlinearity(h) + temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] - h = h + self.temb_proj(self.nonlinearity(temb))[:, :, None, None] + scale, shift = torch.chunk(temb, 2, dim=1) - if self.pre_norm: - h = self.norm2(h) - h = self.nonlinearity(h) + h = self.norm2(h) + h = h * scale + shift + + h = self.norm2(h) + + h = self.nonlinearity(h) h = self.dropout(h) h = self.conv2(h) - if not self.pre_norm: - h = self.norm2(h) - h = self.nonlinearity(h) - if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) @@ -376,7 +372,7 @@ class ResBlock(TimestepBlock): return x + h -# unet.py and unet_grad_tts.py +# unet.py, unet_grad_tts.py, unet_ldm.py class ResnetBlock(nn.Module): def __init__( self, @@ -410,6 +406,7 @@ class ResnetBlock(nn.Module): self.norm2 = Normalize(out_channels, num_groups=groups, eps=eps) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) + if non_linearity == "swish": self.nonlinearity = nonlinearity elif non_linearity == "mish": From db934c67508ef8aed715544526fdf78a06dde2f4 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 21:47:40 +0000 Subject: [PATCH 21/32] fix more tests --- src/diffusers/models/resnet.py | 88 ++++++++++++++++++++++++++-------- tests/test_modeling_utils.py | 2 +- 2 files changed, 69 insertions(+), 21 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 29fc6a8f00..83e7cfd979 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -1,4 +1,3 @@ -import string from abc import abstractmethod import numpy as np @@ -188,7 +187,7 @@ class ResBlock(TimestepBlock): use_checkpoint=False, up=False, down=False, - overwrite=False, # TODO(Patrick) - use for glide at later stage + overwrite=True, # TODO(Patrick) - use for glide at later stage ): super().__init__() self.channels = channels @@ -220,12 +219,10 @@ class ResBlock(TimestepBlock): nn.SiLU(), linear( emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + 2 * self.out_channels, ), ) self.out_layers = nn.Sequential( -# normalization(self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0), -# nn.SiLU() if use_scale_shift_norm else nn.Identity(), normalization(self.out_channels, swish=0.0), nn.SiLU(), nn.Dropout(p=dropout), @@ -257,13 +254,16 @@ class ResBlock(TimestepBlock): self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut + # Add to init + self.time_embedding_norm = "scale_shift" + if self.pre_norm: self.norm1 = Normalize(in_channels, num_groups=groups, eps=eps) else: self.norm1 = Normalize(out_channels, num_groups=groups, eps=eps) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + self.temb_proj = torch.nn.Linear(temb_channels, 2 * out_channels) self.norm2 = Normalize(out_channels, num_groups=groups, eps=eps) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) @@ -277,6 +277,14 @@ class ResBlock(TimestepBlock): if self.in_channels != self.out_channels: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.up, self.down = up, down +# if self.up: +# self.h_upd = Upsample(in_channels, use_conv=False, dims=dims) +# self.x_upd = Upsample(in_channels, use_conv=False, dims=dims) +# elif self.down: +# self.h_upd = Downsample(in_channels, use_conv=False, dims=dims, padding=1, name="op") +# self.x_upd = Downsample(in_channels, use_conv=False, dims=dims, padding=1, name="op") + def set_weights(self): # TODO(Patrick): use for glide at later stage self.norm1.weight.data = self.in_layers[0].weight.data @@ -309,6 +317,7 @@ class ResBlock(TimestepBlock): # TODO(Patrick): use for glide at later stage self.set_weights() + orig_x = x if self.updown: in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] h = in_rest(x) @@ -334,8 +343,7 @@ class ResBlock(TimestepBlock): result = self.skip_connection(x) + h # TODO(Patrick) Use for glide at later stage - # result = self.forward_2(x, emb) - + result = self.forward_2(orig_x, emb) return result def forward_2(self, x, temb): @@ -347,18 +355,24 @@ class ResBlock(TimestepBlock): h = self.norm1(h) h = self.nonlinearity(h) + if self.up or self.down: + x = self.x_upd(x) + h = self.h_upd(h) + h = self.conv1(h) temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] - scale, shift = torch.chunk(temb, 2, dim=1) + if self.time_embedding_norm == "scale_shift": + scale, shift = torch.chunk(temb, 2, dim=1) - h = self.norm2(h) - h = h * scale + shift - - h = self.norm2(h) - - h = self.nonlinearity(h) + h = self.norm2(h) + h = h + h * scale + shift + h = self.nonlinearity(h) + else: + h = h + temb + h = self.norm2(h) + h = self.nonlinearity(h) h = self.dropout(h) h = self.conv2(h) @@ -386,8 +400,12 @@ class ResnetBlock(nn.Module): pre_norm=True, eps=1e-6, non_linearity="swish", + time_embedding_norm="default", + up=False, + down=False, overwrite_for_grad_tts=False, overwrite_for_ldm=False, + overwrite_for_glide=False, ): super().__init__() self.pre_norm = pre_norm @@ -395,6 +413,9 @@ class ResnetBlock(nn.Module): out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut + self.time_embedding_norm = time_embedding_norm + self.up = up + self.down = down if self.pre_norm: self.norm1 = Normalize(in_channels, num_groups=groups, eps=eps) @@ -402,7 +423,12 @@ class ResnetBlock(nn.Module): self.norm1 = Normalize(out_channels, num_groups=groups, eps=eps) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + + if time_embedding_norm == "default": + self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + if time_embedding_norm == "scale_shift": + self.temb_proj = torch.nn.Linear(temb_channels, 2 * out_channels) + self.norm2 = Normalize(out_channels, num_groups=groups, eps=eps) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) @@ -414,6 +440,13 @@ class ResnetBlock(nn.Module): elif non_linearity == "silu": self.nonlinearity = nn.SiLU() + if up: + self.h_upd = Upsample(in_channels, use_conv=False, dims=2) + self.x_upd = Upsample(in_channels, use_conv=False, dims=2) + elif down: + self.h_upd = Downsample(in_channels, use_conv=False, dims=2, padding=1, name="op") + self.x_upd = Downsample(in_channels, use_conv=False, dims=2, padding=1, name="op") + if self.in_channels != self.out_channels: if self.use_conv_shortcut: # TODO(Patrick) - this branch is never used I think => can be deleted! @@ -422,8 +455,9 @@ class ResnetBlock(nn.Module): self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) self.is_overwritten = False + self.overwrite_for_glide = overwrite_for_glide self.overwrite_for_grad_tts = overwrite_for_grad_tts - self.overwrite_for_ldm = overwrite_for_ldm + self.overwrite_for_ldm = overwrite_for_ldm or overwrite_for_glide if self.overwrite_for_grad_tts: dim = in_channels dim_out = out_channels @@ -517,12 +551,18 @@ class ResnetBlock(nn.Module): self.set_weights_ldm() self.is_overwritten = True + if self.up or self.down: + x = self.x_upd(x) + h = x h = h * mask if self.pre_norm: h = self.norm1(h) h = self.nonlinearity(h) + if self.up or self.down: + h = self.h_upd(h) + h = self.conv1(h) if not self.pre_norm: @@ -530,12 +570,20 @@ class ResnetBlock(nn.Module): h = self.nonlinearity(h) h = h * mask - h = h + self.temb_proj(self.nonlinearity(temb))[:, :, None, None] + temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] + + if self.time_embedding_norm == "scale_shift": + scale, shift = torch.chunk(temb, 2, dim=1) - h = h * mask - if self.pre_norm: h = self.norm2(h) + h = h + h * scale + shift h = self.nonlinearity(h) + elif self.time_embedding_norm == "default": + h = h + temb + h = h * mask + if self.pre_norm: + h = self.norm2(h) + h = self.nonlinearity(h) h = self.dropout(h) h = self.conv2(h) diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 0f63d86d9f..ff37e8ab6e 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -259,7 +259,7 @@ class UnetModelTests(ModelTesterMixin, unittest.TestCase): # fmt: off expected_output_slice = torch.tensor([0.2891, -0.1899, 0.2595, -0.6214, 0.0968, -0.2622, 0.4688, 0.1311, 0.0053]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) class GlideSuperResUNetTests(ModelTesterMixin, unittest.TestCase): From fd6f93b2b10654324ae58bfea3bc2d0dbff90427 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 22:09:49 +0000 Subject: [PATCH 22/32] all glide passes --- src/diffusers/models/resnet.py | 22 ++-- src/diffusers/models/unet_glide.py | 157 ++++++++++++++++++++--------- 2 files changed, 117 insertions(+), 62 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 83e7cfd979..f95d9198b3 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -378,10 +378,7 @@ class ResBlock(TimestepBlock): h = self.conv2(h) if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) + x = self.nin_shortcut(x) return x + h @@ -426,7 +423,7 @@ class ResnetBlock(nn.Module): if time_embedding_norm == "default": self.temb_proj = torch.nn.Linear(temb_channels, out_channels) - if time_embedding_norm == "scale_shift": + elif time_embedding_norm == "scale_shift": self.temb_proj = torch.nn.Linear(temb_channels, 2 * out_channels) self.norm2 = Normalize(out_channels, num_groups=groups, eps=eps) @@ -489,7 +486,7 @@ class ResnetBlock(nn.Module): nn.SiLU(), linear( emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + 2 * self.out_channels if self.time_embedding_norm == "scale_shift" else self.out_channels, ), ) self.out_layers = nn.Sequential( @@ -551,9 +548,6 @@ class ResnetBlock(nn.Module): self.set_weights_ldm() self.is_overwritten = True - if self.up or self.down: - x = self.x_upd(x) - h = x h = h * mask if self.pre_norm: @@ -561,6 +555,7 @@ class ResnetBlock(nn.Module): h = self.nonlinearity(h) if self.up or self.down: + x = self.x_upd(x) h = self.h_upd(h) h = self.conv1(h) @@ -571,7 +566,6 @@ class ResnetBlock(nn.Module): h = h * mask temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] - if self.time_embedding_norm == "scale_shift": scale, shift = torch.chunk(temb, 2, dim=1) @@ -595,10 +589,10 @@ class ResnetBlock(nn.Module): x = x * mask if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) +# if self.use_conv_shortcut: +# x = self.conv_shortcut(x) +# else: + x = self.nin_shortcut(x) return x + h diff --git a/src/diffusers/models/unet_glide.py b/src/diffusers/models/unet_glide.py index 477c1768ae..a0af4b9f48 100644 --- a/src/diffusers/models/unet_glide.py +++ b/src/diffusers/models/unet_glide.py @@ -7,6 +7,7 @@ from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding from .resnet import Downsample, ResBlock, TimestepBlock, Upsample +from .resnet import ResnetBlock def convert_module_to_f16(l): @@ -101,7 +102,7 @@ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): def forward(self, x, emb, encoder_out=None): for layer in self: - if isinstance(layer, TimestepBlock): + if isinstance(layer, TimestepBlock) or isinstance(layer, ResnetBlock): x = layer(x, emb) elif isinstance(layer, AttentionBlock): x = layer(x, encoder_out) @@ -190,14 +191,24 @@ class GlideUNetModel(ModelMixin, ConfigMixin): for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=int(mult * model_channels), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# out_channels=int(mult * model_channels), +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# ) + ResnetBlock( + in_channels=ch, + out_channels=mult * model_channels, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + time_embedding_norm="scale_shift", + overwrite_for_glide=True, ) ] ch = int(mult * model_channels) @@ -218,15 +229,26 @@ class GlideUNetModel(ModelMixin, ConfigMixin): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# out_channels=out_ch, +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# down=True, +# ) + ResnetBlock( + in_channels=ch, out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + time_embedding_norm="scale_shift", + overwrite_for_glide=True, + down=True ) if resblock_updown else Downsample( @@ -240,13 +262,22 @@ class GlideUNetModel(ModelMixin, ConfigMixin): self._feature_size += ch self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# ), + ResnetBlock( + in_channels=ch, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + time_embedding_norm="scale_shift", + overwrite_for_glide=True, ), AttentionBlock( ch, @@ -255,14 +286,23 @@ class GlideUNetModel(ModelMixin, ConfigMixin): num_head_channels=num_head_channels, encoder_channels=transformer_dim, ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# ), + ResnetBlock( + in_channels=ch, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + time_embedding_norm="scale_shift", + overwrite_for_glide=True, + ) ) self._feature_size += ch @@ -271,15 +311,25 @@ class GlideUNetModel(ModelMixin, ConfigMixin): for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=int(model_channels * mult), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) +# ResBlock( +# ch + ich, +# time_embed_dim, +# dropout, +# out_channels=int(model_channels * mult), +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# ) + ResnetBlock( + in_channels=ch + ich, + out_channels=model_channels * mult, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + time_embedding_norm="scale_shift", + overwrite_for_glide=True, + ), ] ch = int(model_channels * mult) if ds in attention_resolutions: @@ -295,14 +345,25 @@ class GlideUNetModel(ModelMixin, ConfigMixin): if level and i == num_res_blocks: out_ch = ch layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, +# ResBlock( +# ch, +# time_embed_dim, +# dropout, +# out_channels=out_ch, +# dims=dims, +# use_checkpoint=use_checkpoint, +# use_scale_shift_norm=use_scale_shift_norm, +# up=True, +# ) + ResnetBlock( + in_channels=ch, out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, + dropout=dropout, + temb_channels=time_embed_dim, + eps=1e-5, + non_linearity="silu", + time_embedding_norm="scale_shift", + overwrite_for_glide=True, up=True, ) if resblock_updown From efe1e60e12d07ef8a32db7e43935e6bd9ea74904 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 22:24:22 +0000 Subject: [PATCH 23/32] merge glide into resnets --- src/diffusers/models/resnet.py | 243 +---------------------------- src/diffusers/models/unet_glide.py | 73 ++------- tests/test_modeling_utils.py | 2 +- 3 files changed, 16 insertions(+), 302 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index f95d9198b3..f48a94039e 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -161,229 +161,7 @@ class Downsample(nn.Module): # RESNETS -# unet_glide.py -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - - :param channels: the number of input channels. :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. :param out_channels: if specified, the number of out channels. :param - use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. :param use_checkpoint: if True, use gradient checkpointing - on this module. :param up: if True, use this block for upsampling. :param down: if True, use this block for - downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - overwrite=True, # TODO(Patrick) - use for glide at later stage - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels, swish=1.0), - nn.Identity(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, use_conv=False, dims=dims) - self.x_upd = Upsample(channels, use_conv=False, dims=dims) - elif down: - self.h_upd = Downsample(channels, use_conv=False, dims=dims, padding=1, name="op") - self.x_upd = Downsample(channels, use_conv=False, dims=dims, padding=1, name="op") - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels, swish=0.0), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - self.overwrite = overwrite - self.is_overwritten = False - if self.overwrite: - in_channels = channels - out_channels = self.out_channels - conv_shortcut = False - dropout = 0.0 - temb_channels = emb_channels - groups = 32 - pre_norm = True - eps = 1e-5 - non_linearity = "silu" - self.pre_norm = pre_norm - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - # Add to init - self.time_embedding_norm = "scale_shift" - - if self.pre_norm: - self.norm1 = Normalize(in_channels, num_groups=groups, eps=eps) - else: - self.norm1 = Normalize(out_channels, num_groups=groups, eps=eps) - - self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - self.temb_proj = torch.nn.Linear(temb_channels, 2 * out_channels) - self.norm2 = Normalize(out_channels, num_groups=groups, eps=eps) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) - if non_linearity == "swish": - self.nonlinearity = nonlinearity - elif non_linearity == "mish": - self.nonlinearity = Mish() - elif non_linearity == "silu": - self.nonlinearity = nn.SiLU() - - if self.in_channels != self.out_channels: - self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) - - self.up, self.down = up, down -# if self.up: -# self.h_upd = Upsample(in_channels, use_conv=False, dims=dims) -# self.x_upd = Upsample(in_channels, use_conv=False, dims=dims) -# elif self.down: -# self.h_upd = Downsample(in_channels, use_conv=False, dims=dims, padding=1, name="op") -# self.x_upd = Downsample(in_channels, use_conv=False, dims=dims, padding=1, name="op") - - def set_weights(self): - # TODO(Patrick): use for glide at later stage - self.norm1.weight.data = self.in_layers[0].weight.data - self.norm1.bias.data = self.in_layers[0].bias.data - - self.conv1.weight.data = self.in_layers[-1].weight.data - self.conv1.bias.data = self.in_layers[-1].bias.data - - self.temb_proj.weight.data = self.emb_layers[-1].weight.data - self.temb_proj.bias.data = self.emb_layers[-1].bias.data - - self.norm2.weight.data = self.out_layers[0].weight.data - self.norm2.bias.data = self.out_layers[0].bias.data - - self.conv2.weight.data = self.out_layers[-1].weight.data - self.conv2.bias.data = self.out_layers[-1].bias.data - - if self.in_channels != self.out_channels: - self.nin_shortcut.weight.data = self.skip_connection.weight.data - self.nin_shortcut.bias.data = self.skip_connection.bias.data - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - - :param x: an [N x C x ...] Tensor of features. :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - if self.overwrite: - # TODO(Patrick): use for glide at later stage - self.set_weights() - - orig_x = x - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = torch.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - - result = self.skip_connection(x) + h - - # TODO(Patrick) Use for glide at later stage - result = self.forward_2(orig_x, emb) - return result - - def forward_2(self, x, temb): - if self.overwrite and not self.is_overwritten: - self.set_weights() - self.is_overwritten = True - - h = x - h = self.norm1(h) - h = self.nonlinearity(h) - - if self.up or self.down: - x = self.x_upd(x) - h = self.h_upd(h) - - h = self.conv1(h) - - temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] - - if self.time_embedding_norm == "scale_shift": - scale, shift = torch.chunk(temb, 2, dim=1) - - h = self.norm2(h) - h = h + h * scale + shift - h = self.nonlinearity(h) - else: - h = h + temb - h = self.norm2(h) - h = self.nonlinearity(h) - - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - x = self.nin_shortcut(x) - - return x + h - - -# unet.py, unet_grad_tts.py, unet_ldm.py +# unet.py, unet_grad_tts.py, unet_ldm.py, unet_glide.py class ResnetBlock(nn.Module): def __init__( self, @@ -445,12 +223,9 @@ class ResnetBlock(nn.Module): self.x_upd = Downsample(in_channels, use_conv=False, dims=2, padding=1, name="op") if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - # TODO(Patrick) - this branch is never used I think => can be deleted! - self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + # TODO(SURAJ, PATRICK): ALL OF THE FOLLOWING OF THE INIT METHOD CAN BE DELETED ONCE WEIGHTS ARE CONVERTED self.is_overwritten = False self.overwrite_for_glide = overwrite_for_glide self.overwrite_for_grad_tts = overwrite_for_grad_tts @@ -497,8 +272,6 @@ class ResnetBlock(nn.Module): ) if self.out_channels == in_channels: self.skip_connection = nn.Identity() - # elif use_conv: - # self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) else: self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) @@ -541,6 +314,8 @@ class ResnetBlock(nn.Module): self.nin_shortcut.bias.data = self.skip_connection.bias.data def forward(self, x, temb, mask=1.0): + # TODO(Patrick) eventually this class should be split into multiple classes + # too many if else statements if self.overwrite_for_grad_tts and not self.is_overwritten: self.set_weights_grad_tts() self.is_overwritten = True @@ -566,6 +341,7 @@ class ResnetBlock(nn.Module): h = h * mask temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] + if self.time_embedding_norm == "scale_shift": scale, shift = torch.chunk(temb, 2, dim=1) @@ -589,9 +365,6 @@ class ResnetBlock(nn.Module): x = x * mask if self.in_channels != self.out_channels: -# if self.use_conv_shortcut: -# x = self.conv_shortcut(x) -# else: x = self.nin_shortcut(x) return x + h @@ -605,10 +378,6 @@ class Block(torch.nn.Module): torch.nn.Conv2d(dim, dim_out, 3, padding=1), torch.nn.GroupNorm(groups, dim_out), Mish() ) - def forward(self, x, mask): - output = self.block(x * mask) - return output * mask - # unet_score_estimation.py class ResnetBlockBigGANpp(nn.Module): diff --git a/src/diffusers/models/unet_glide.py b/src/diffusers/models/unet_glide.py index a0af4b9f48..a74507974e 100644 --- a/src/diffusers/models/unet_glide.py +++ b/src/diffusers/models/unet_glide.py @@ -6,8 +6,7 @@ from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding -from .resnet import Downsample, ResBlock, TimestepBlock, Upsample -from .resnet import ResnetBlock +from .resnet import Downsample, ResnetBlock, TimestepBlock, Upsample def convert_module_to_f16(l): @@ -191,15 +190,6 @@ class GlideUNetModel(ModelMixin, ConfigMixin): for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# out_channels=int(mult * model_channels), -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# ) ResnetBlock( in_channels=ch, out_channels=mult * model_channels, @@ -207,7 +197,7 @@ class GlideUNetModel(ModelMixin, ConfigMixin): temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", - time_embedding_norm="scale_shift", + time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, ) ] @@ -229,16 +219,6 @@ class GlideUNetModel(ModelMixin, ConfigMixin): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# out_channels=out_ch, -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# down=True, -# ) ResnetBlock( in_channels=ch, out_channels=out_ch, @@ -246,9 +226,9 @@ class GlideUNetModel(ModelMixin, ConfigMixin): temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", - time_embedding_norm="scale_shift", + time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, - down=True + down=True, ) if resblock_updown else Downsample( @@ -262,21 +242,13 @@ class GlideUNetModel(ModelMixin, ConfigMixin): self._feature_size += ch self.middle_block = TimestepEmbedSequential( -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# ), ResnetBlock( in_channels=ch, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", - time_embedding_norm="scale_shift", + time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, ), AttentionBlock( @@ -286,23 +258,15 @@ class GlideUNetModel(ModelMixin, ConfigMixin): num_head_channels=num_head_channels, encoder_channels=transformer_dim, ), -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# ), ResnetBlock( in_channels=ch, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", - time_embedding_norm="scale_shift", + time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, - ) + ), ) self._feature_size += ch @@ -311,15 +275,6 @@ class GlideUNetModel(ModelMixin, ConfigMixin): for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ -# ResBlock( -# ch + ich, -# time_embed_dim, -# dropout, -# out_channels=int(model_channels * mult), -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# ) ResnetBlock( in_channels=ch + ich, out_channels=model_channels * mult, @@ -327,7 +282,7 @@ class GlideUNetModel(ModelMixin, ConfigMixin): temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", - time_embedding_norm="scale_shift", + time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, ), ] @@ -345,16 +300,6 @@ class GlideUNetModel(ModelMixin, ConfigMixin): if level and i == num_res_blocks: out_ch = ch layers.append( -# ResBlock( -# ch, -# time_embed_dim, -# dropout, -# out_channels=out_ch, -# dims=dims, -# use_checkpoint=use_checkpoint, -# use_scale_shift_norm=use_scale_shift_norm, -# up=True, -# ) ResnetBlock( in_channels=ch, out_channels=out_ch, @@ -362,7 +307,7 @@ class GlideUNetModel(ModelMixin, ConfigMixin): temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", - time_embedding_norm="scale_shift", + time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, up=True, ) diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index ff37e8ab6e..1a410b93ff 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -795,7 +795,7 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): sizes = (32, 32) noise = torch.randn((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [9.]).to(torch_device) + time_step = torch.tensor(batch_size * [9.0]).to(torch_device) with torch.no_grad(): output = model(noise, time_step) From 52e0c5b294a1214deba681fd4115f995395b7548 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 22:28:28 +0000 Subject: [PATCH 24/32] update --- test_modeling_utils.py | 1181 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1181 insertions(+) create mode 100755 test_modeling_utils.py diff --git a/test_modeling_utils.py b/test_modeling_utils.py new file mode 100755 index 0000000000..94f88a6a04 --- /dev/null +++ b/test_modeling_utils.py @@ -0,0 +1,1181 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +import tempfile +import unittest + +import numpy as np +import torch + +from diffusers import ( + AutoencoderKL, + BDDMPipeline, + DDIMPipeline, + DDIMScheduler, + DDPMPipeline, + DDPMScheduler, + GlidePipeline, + GlideSuperResUNetModel, + GlideTextToImageUNetModel, + GradTTSPipeline, + GradTTSScheduler, + LatentDiffusionPipeline, + LatentDiffusionUncondPipeline, + NCSNpp, + PNDMPipeline, + PNDMScheduler, + ScoreSdeVePipeline, + ScoreSdeVeScheduler, + ScoreSdeVpPipeline, + ScoreSdeVpScheduler, + TemporalUNet, + UNetGradTTSModel, + UNetLDMModel, + UNetModel, + VQModel, +) +from diffusers.configuration_utils import ConfigMixin +from diffusers.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.bddm.pipeline_bddm import DiffWave +from diffusers.testing_utils import floats_tensor, slow, torch_device + + +torch.backends.cuda.matmul.allow_tf32 = False + + +class ConfigTester(unittest.TestCase): + def test_load_not_from_mixin(self): + with self.assertRaises(ValueError): + ConfigMixin.from_config("dummy_path") + + def test_save_load(self): + class SampleObject(ConfigMixin): + config_name = "config.json" + + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + e=[1, 3], + ): + self.register_to_config(a=a, b=b, c=c, d=d, e=e) + + obj = SampleObject() + config = obj.config + + assert config["a"] == 2 + assert config["b"] == 5 + assert config["c"] == (2, 5) + assert config["d"] == "for diffusion" + assert config["e"] == [1, 3] + + with tempfile.TemporaryDirectory() as tmpdirname: + obj.save_config(tmpdirname) + new_obj = SampleObject.from_config(tmpdirname) + new_config = new_obj.config + + # unfreeze configs + config = dict(config) + new_config = dict(new_config) + + assert config.pop("c") == (2, 5) # instantiated as tuple + assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json + assert config == new_config + + +class ModelTesterMixin: + def test_from_pretrained_save_pretrained(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + new_model = self.model_class.from_pretrained(tmpdirname) + new_model.to(torch_device) + + with torch.no_grad(): + image = model(**inputs_dict) + new_image = new_model(**inputs_dict) + + max_diff = (image - new_image).abs().sum().item() + self.assertLessEqual(max_diff, 5e-5, "Models give different forward passes") + + def test_determinism(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + with torch.no_grad(): + first = model(**inputs_dict) + second = model(**inputs_dict) + + out_1 = first.cpu().numpy() + out_2 = second.cpu().numpy() + out_1 = out_1[~np.isnan(out_1)] + out_2 = out_2[~np.isnan(out_2)] + max_diff = np.amax(np.abs(out_1 - out_2)) + self.assertLessEqual(max_diff, 1e-5) + + def test_output(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + self.assertIsNotNone(output) + expected_shape = inputs_dict["x"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_forward_signature(self): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["x", "timesteps"] + self.assertListEqual(arg_names[:2], expected_arg_names) + + def test_model_from_config(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + # test if the model can be loaded from the config + # and has all the expected shape + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_config(tmpdirname) + new_model = self.model_class.from_config(tmpdirname) + new_model.to(torch_device) + new_model.eval() + + # check if all paramters shape are the same + for param_name in model.state_dict().keys(): + param_1 = model.state_dict()[param_name] + param_2 = new_model.state_dict()[param_name] + self.assertEqual(param_1.shape, param_2.shape) + + with torch.no_grad(): + output_1 = model(**inputs_dict) + output_2 = new_model(**inputs_dict) + + self.assertEqual(output_1.shape, output_2.shape) + + def test_training(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.train() + output = model(**inputs_dict) + noise = torch.randn((inputs_dict["x"].shape[0],) + self.output_shape).to(torch_device) + loss = torch.nn.functional.mse_loss(output, noise) + loss.backward() + + +class UnetModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNetModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + + return {"x": noise, "timesteps": time_step} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "ch": 32, + "ch_mult": (1, 2), + "num_res_blocks": 2, + "attn_resolutions": (16,), + "resolution": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNetModel.from_pretrained("fusing/ddpm_dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = UNetModel.from_pretrained("fusing/ddpm_dummy") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + noise = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) + time_step = torch.tensor([10]) + + with torch.no_grad(): + output = model(noise, time_step) + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([0.2891, -0.1899, 0.2595, -0.6214, 0.0968, -0.2622, 0.4688, 0.1311, 0.0053]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class GlideSuperResUNetTests(ModelTesterMixin, unittest.TestCase): + model_class = GlideSuperResUNetModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 6 + sizes = (32, 32) + low_res_size = (4, 4) + + noise = torch.randn((batch_size, num_channels // 2) + sizes).to(torch_device) + low_res = torch.randn((batch_size, 3) + low_res_size).to(torch_device) + time_step = torch.tensor([10] * noise.shape[0], device=torch_device) + + return {"x": noise, "timesteps": time_step, "low_res": low_res} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (6, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "attention_resolutions": (2,), + "channel_mult": (1, 2), + "in_channels": 6, + "out_channels": 6, + "model_channels": 32, + "num_head_channels": 8, + "num_heads_upsample": 1, + "num_res_blocks": 2, + "resblock_updown": True, + "resolution": 32, + "use_scale_shift_norm": True, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + output, _ = torch.split(output, 3, dim=1) + + self.assertIsNotNone(output) + expected_shape = inputs_dict["x"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_from_pretrained_hub(self): + model, loading_info = GlideSuperResUNetModel.from_pretrained( + "fusing/glide-super-res-dummy", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = GlideSuperResUNetModel.from_pretrained("fusing/glide-super-res-dummy") + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + noise = torch.randn(1, 3, 64, 64) + low_res = torch.randn(1, 3, 4, 4) + time_step = torch.tensor([42] * noise.shape[0]) + + with torch.no_grad(): + output = model(noise, time_step, low_res) + + output, _ = torch.split(output, 3, dim=1) + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-22.8782, -23.2652, -15.3966, -22.8034, -23.3159, -15.5640, -15.3970, -15.4614, - 10.4370]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + + +class GlideTextToImageUNetModelTests(ModelTesterMixin, unittest.TestCase): + model_class = GlideTextToImageUNetModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + transformer_dim = 32 + seq_len = 16 + + noise = torch.randn((batch_size, num_channels) + sizes).to(torch_device) + emb = torch.randn((batch_size, seq_len, transformer_dim)).to(torch_device) + time_step = torch.tensor([10] * noise.shape[0], device=torch_device) + + return {"x": noise, "timesteps": time_step, "transformer_out": emb} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (6, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "attention_resolutions": (2,), + "channel_mult": (1, 2), + "in_channels": 3, + "out_channels": 6, + "model_channels": 32, + "num_head_channels": 8, + "num_heads_upsample": 1, + "num_res_blocks": 2, + "resblock_updown": True, + "resolution": 32, + "use_scale_shift_norm": True, + "transformer_dim": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + output, _ = torch.split(output, 3, dim=1) + + self.assertIsNotNone(output) + expected_shape = inputs_dict["x"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_from_pretrained_hub(self): + model, loading_info = GlideTextToImageUNetModel.from_pretrained( + "fusing/unet-glide-text2im-dummy", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = GlideTextToImageUNetModel.from_pretrained("fusing/unet-glide-text2im-dummy") + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + noise = torch.randn((1, model.config.in_channels, model.config.resolution, model.config.resolution)).to( + torch_device + ) + emb = torch.randn((1, 16, model.config.transformer_dim)).to(torch_device) + time_step = torch.tensor([10] * noise.shape[0], device=torch_device) + + model.to(torch_device) + with torch.no_grad(): + output = model(noise, time_step, emb) + + output, _ = torch.split(output, 3, dim=1) + output_slice = output[0, -1, -3:, -3:].cpu().flatten() + # fmt: off + expected_output_slice = torch.tensor([2.7766, -10.3558, -14.9149, -0.9376, -14.9175, -17.7679, -5.5565, -12.9521, -12.9845]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + + +class UNetLDMModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNetLDMModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 4 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + + return {"x": noise, "timesteps": time_step} + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "image_size": 32, + "in_channels": 4, + "out_channels": 4, + "model_channels": 32, + "num_res_blocks": 2, + "attention_resolutions": (16,), + "channel_mult": (1, 2), + "num_heads": 2, + "conv_resample": True, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) + time_step = torch.tensor([10] * noise.shape[0]) + + with torch.no_grad(): + output = model(noise, time_step) + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + + def test_output_pretrained_spatial_transformer(self): + model = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy-spatial") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) + context = torch.ones((1, 16, 64), dtype=torch.float32) + time_step = torch.tensor([10] * noise.shape[0]) + + with torch.no_grad(): + output = model(noise, time_step, context=context) + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([61.3445, 56.9005, 29.4339, 59.5497, 60.7375, 34.1719, 48.1951, 42.6569, 25.0890]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + + +class UNetGradTTSModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNetGradTTSModel + + @property + def dummy_input(self): + batch_size = 4 + num_features = 32 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + condition = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + mask = floats_tensor((batch_size, 1, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"x": noise, "timesteps": time_step, "mu": condition, "mask": mask} + + @property + def input_shape(self): + return (4, 32, 16) + + @property + def output_shape(self): + return (4, 32, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "dim": 64, + "groups": 4, + "dim_mults": (1, 2), + "n_feats": 32, + "pe_scale": 1000, + "n_spks": 1, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNetGradTTSModel.from_pretrained("fusing/unet-grad-tts-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = UNetGradTTSModel.from_pretrained("fusing/unet-grad-tts-dummy") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = model.config.n_feats + seq_len = 16 + noise = torch.randn((1, num_features, seq_len)) + condition = torch.randn((1, num_features, seq_len)) + mask = torch.randn((1, 1, seq_len)) + time_step = torch.tensor([10]) + + with torch.no_grad(): + output = model(noise, time_step, condition, mask) + + output_slice = output[0, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-0.0690, -0.0531, 0.0633, -0.0660, -0.0541, 0.0650, -0.0656, -0.0555, 0.0617]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + + +class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): + model_class = TemporalUNet + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, seq_len, num_features)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"x": noise, "timesteps": time_step} + + @property + def input_shape(self): + return (4, 16, 14) + + @property + def output_shape(self): + return (4, 16, 14) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "training_horizon": 128, + "dim": 32, + "dim_mults": [1, 4, 8], + "predict_epsilon": False, + "clip_denoised": True, + "transition_dim": 14, + "cond_dim": 3, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = TemporalUNet.from_pretrained( + "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = model.transition_dim + seq_len = 16 + noise = torch.randn((1, seq_len, num_features)) + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = model(noise, time_step) + + output_slice = output[0, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + + +class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): + model_class = NCSNpp + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [10]).to(torch_device) + + return {"x": noise, "timesteps": time_step} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "image_size": 32, + "ch_mult": [1, 2, 2, 2], + "nf": 32, + "fir": True, + "progressive": "output_skip", + "progressive_combine": "sum", + "progressive_input": "input_skip", + "scale_by_sigma": True, + "skip_rescale": True, + "embedding_type": "fourier", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = NCSNpp.from_pretrained("fusing/cifar10-ncsnpp-ve", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained_ve_small(self): + model = NCSNpp.from_pretrained("fusing/ncsnpp-cifar10-ve-dummy") + model.eval() + model.to(torch_device) + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step) + + output_slice = output[0, -3:, -3:, -1].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([0.1315, 0.0741, 0.0393, 0.0455, 0.0556, 0.0180, -0.0832, -0.0644, -0.0856]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + def test_output_pretrained_ve_large(self): + model = NCSNpp.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy") + model.eval() + model.to(torch_device) + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step) + + output_slice = output[0, -3:, -3:, -1].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + def test_output_pretrained_vp(self): + model = NCSNpp.from_pretrained("fusing/cifar10-ddpmpp-vp") + model.eval() + model.to(torch_device) + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = torch.randn((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [9.0]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step) + + output_slice = output[0, -3:, -3:, -1].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([0.3303, -0.2275, -2.8872, -0.1309, -1.2861, 3.4567, -1.0083, 2.5325, -1.3866]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class VQModelTests(ModelTesterMixin, unittest.TestCase): + model_class = VQModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"x": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "ch": 64, + "out_ch": 3, + "num_res_blocks": 1, + "attn_resolutions": [], + "in_channels": 3, + "resolution": 32, + "z_channels": 3, + "n_embed": 256, + "embed_dim": 3, + "sane_index_shape": False, + "ch_mult": (1,), + "dropout": 0.0, + "double_z": False, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_forward_signature(self): + pass + + def test_training(self): + pass + + def test_from_pretrained_hub(self): + model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = VQModel.from_pretrained("fusing/vqgan-dummy") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) + with torch.no_grad(): + output = model(image) + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-1.1321, 0.1056, 0.3505, -0.6461, -0.2014, 0.0419, -0.5763, -0.8462, -0.4218]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class AutoEncoderKLTests(ModelTesterMixin, unittest.TestCase): + model_class = AutoencoderKL + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"x": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "ch": 64, + "ch_mult": (1,), + "embed_dim": 4, + "in_channels": 3, + "num_res_blocks": 1, + "out_ch": 3, + "resolution": 32, + "z_channels": 4, + "attn_resolutions": [], + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_forward_signature(self): + pass + + def test_training(self): + pass + + def test_from_pretrained_hub(self): + model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) + with torch.no_grad(): + output = model(image, sample_posterior=True) + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-0.0814, -0.0229, -0.1320, -0.4123, -0.0366, -0.3473, 0.0438, -0.1662, 0.1750]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class PipelineTesterMixin(unittest.TestCase): + def test_from_pretrained_save_pretrained(self): + # 1. Load models + model = UNetModel(ch=32, ch_mult=(1, 2), num_res_blocks=2, attn_resolutions=(16,), resolution=32) + schedular = DDPMScheduler(timesteps=10) + + ddpm = DDPMPipeline(model, schedular) + + with tempfile.TemporaryDirectory() as tmpdirname: + ddpm.save_pretrained(tmpdirname) + new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) + + generator = torch.manual_seed(0) + + image = ddpm(generator=generator) + generator = generator.manual_seed(0) + new_image = new_ddpm(generator=generator) + + assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" + + @slow + def test_from_pretrained_hub(self): + model_path = "fusing/ddpm-cifar10" + + ddpm = DDPMPipeline.from_pretrained(model_path) + ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path) + + ddpm.noise_scheduler.num_timesteps = 10 + ddpm_from_hub.noise_scheduler.num_timesteps = 10 + + generator = torch.manual_seed(0) + + image = ddpm(generator=generator) + generator = generator.manual_seed(0) + new_image = ddpm_from_hub(generator=generator) + + assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" + + @slow + def test_ddpm_cifar10(self): + model_id = "fusing/ddpm-cifar10" + + unet = UNetModel.from_pretrained(model_id) + noise_scheduler = DDPMScheduler.from_config(model_id) + noise_scheduler = noise_scheduler.set_format("pt") + + ddpm = DDPMPipeline(unet=unet, noise_scheduler=noise_scheduler) + + generator = torch.manual_seed(0) + image = ddpm(generator=generator) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 32, 32) + expected_slice = torch.tensor( + [-0.5712, -0.6215, -0.5953, -0.5438, -0.4775, -0.4539, -0.5172, -0.4872, -0.5105] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_ddim_cifar10(self): + model_id = "fusing/ddpm-cifar10" + + unet = UNetModel.from_pretrained(model_id) + noise_scheduler = DDIMScheduler(tensor_format="pt") + + ddim = DDIMPipeline(unet=unet, noise_scheduler=noise_scheduler) + + generator = torch.manual_seed(0) + image = ddim(generator=generator, eta=0.0) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 32, 32) + expected_slice = torch.tensor( + [-0.6553, -0.6765, -0.6799, -0.6749, -0.7006, -0.6974, -0.6991, -0.7116, -0.7094] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_pndm_cifar10(self): + model_id = "fusing/ddpm-cifar10" + + unet = UNetModel.from_pretrained(model_id) + noise_scheduler = PNDMScheduler(tensor_format="pt") + + pndm = PNDMPipeline(unet=unet, noise_scheduler=noise_scheduler) + generator = torch.manual_seed(0) + image = pndm(generator=generator) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 32, 32) + expected_slice = torch.tensor( + [-0.6872, -0.7071, -0.7188, -0.7057, -0.7515, -0.7191, -0.7377, -0.7565, -0.7500] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + @unittest.skip("Skipping for now as it takes too long") + def test_ldm_text2img(self): + model_id = "fusing/latent-diffusion-text2im-large" + ldm = LatentDiffusionPipeline.from_pretrained(model_id) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + image = ldm([prompt], generator=generator, num_inference_steps=20) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 256, 256) + expected_slice = torch.tensor([0.7295, 0.7358, 0.7256, 0.7435, 0.7095, 0.6884, 0.7325, 0.6921, 0.6458]) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_ldm_text2img_fast(self): + model_id = "fusing/latent-diffusion-text2im-large" + ldm = LatentDiffusionPipeline.from_pretrained(model_id) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + image = ldm([prompt], generator=generator, num_inference_steps=1) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 256, 256) + expected_slice = torch.tensor([0.3163, 0.8670, 0.6465, 0.1865, 0.6291, 0.5139, 0.2824, 0.3723, 0.4344]) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_glide_text2img(self): + model_id = "fusing/glide-base" + glide = GlidePipeline.from_pretrained(model_id) + + prompt = "a pencil sketch of a corgi" + generator = torch.manual_seed(0) + image = glide(prompt, generator=generator, num_inference_steps_upscale=20) + + image_slice = image[0, :3, :3, -1].cpu() + + assert image.shape == (1, 256, 256, 3) + expected_slice = torch.tensor([0.7119, 0.7073, 0.6460, 0.7780, 0.7423, 0.6926, 0.7378, 0.7189, 0.7784]) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_grad_tts(self): + model_id = "fusing/grad-tts-libri-tts" + grad_tts = GradTTSPipeline.from_pretrained(model_id) + noise_scheduler = GradTTSScheduler() + grad_tts.noise_scheduler = noise_scheduler + + text = "Hello world, I missed you so much." + generator = torch.manual_seed(0) + + # generate mel spectograms using text + mel_spec = grad_tts(text, generator=generator) + + assert mel_spec.shape == (1, 80, 143) + expected_slice = torch.tensor( + [-6.7584, -6.8347, -6.3293, -6.6437, -6.7233, -6.4684, -6.1187, -6.3172, -6.6890] + ) + assert (mel_spec[0, :3, :3].cpu().flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_score_sde_ve_pipeline(self): + model = NCSNpp.from_pretrained("fusing/ffhq_ncsnpp") + scheduler = ScoreSdeVeScheduler.from_config("fusing/ffhq_ncsnpp") + + sde_ve = ScoreSdeVePipeline(model=model, scheduler=scheduler) + + torch.manual_seed(0) + image = sde_ve(num_inference_steps=2) + + expected_image_sum = 3382849024.0 + expected_image_mean = 1075.3788 + + assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2 + assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4 + + @slow + def test_score_sde_vp_pipeline(self): + model = NCSNpp.from_pretrained("fusing/cifar10-ddpmpp-vp") + scheduler = ScoreSdeVpScheduler.from_config("fusing/cifar10-ddpmpp-vp") + + sde_vp = ScoreSdeVpPipeline(model=model, scheduler=scheduler) + + torch.manual_seed(0) + image = sde_vp(num_inference_steps=10) + + expected_image_sum = 4183.2012 + expected_image_mean = 1.3617 + + assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2 + assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4 + + @slow + def test_ldm_uncond(self): + ldm = LatentDiffusionUncondPipeline.from_pretrained("fusing/latent-diffusion-celeba-256") + + generator = torch.manual_seed(0) + image = ldm(generator=generator, num_inference_steps=5) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 256, 256) + expected_slice = torch.tensor([0.5025, 0.4121, 0.3851, 0.4806, 0.3996, 0.3745, 0.4839, 0.4559, 0.4293]) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + def test_module_from_pipeline(self): + model = DiffWave(num_res_layers=4) + noise_scheduler = DDPMScheduler(timesteps=12) + + bddm = BDDMPipeline(model, noise_scheduler) + + # check if the library name for the diffwave moduel is set to pipeline module + self.assertTrue(bddm.config["diffwave"][0] == "bddm") + + # check if we can save and load the pipeline + with tempfile.TemporaryDirectory() as tmpdirname: + bddm.save_pretrained(tmpdirname) + _ = BDDMPipeline.from_pretrained(tmpdirname) + # check if the same works using the DifusionPipeline class + bddm = DiffusionPipeline.from_pretrained(tmpdirname) + + self.assertTrue(bddm.config["diffwave"][0] == "bddm") From db7ec72dd84ffe9c3b290b6bbf6d583a701cd2cf Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 22:29:18 +0000 Subject: [PATCH 25/32] up --- test_modeling_utils.py | 1181 ---------------------------------- tests/test_modeling_utils.py | 4 +- 2 files changed, 2 insertions(+), 1183 deletions(-) delete mode 100755 test_modeling_utils.py diff --git a/test_modeling_utils.py b/test_modeling_utils.py deleted file mode 100755 index 94f88a6a04..0000000000 --- a/test_modeling_utils.py +++ /dev/null @@ -1,1181 +0,0 @@ -# coding=utf-8 -# Copyright 2022 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import inspect -import tempfile -import unittest - -import numpy as np -import torch - -from diffusers import ( - AutoencoderKL, - BDDMPipeline, - DDIMPipeline, - DDIMScheduler, - DDPMPipeline, - DDPMScheduler, - GlidePipeline, - GlideSuperResUNetModel, - GlideTextToImageUNetModel, - GradTTSPipeline, - GradTTSScheduler, - LatentDiffusionPipeline, - LatentDiffusionUncondPipeline, - NCSNpp, - PNDMPipeline, - PNDMScheduler, - ScoreSdeVePipeline, - ScoreSdeVeScheduler, - ScoreSdeVpPipeline, - ScoreSdeVpScheduler, - TemporalUNet, - UNetGradTTSModel, - UNetLDMModel, - UNetModel, - VQModel, -) -from diffusers.configuration_utils import ConfigMixin -from diffusers.pipeline_utils import DiffusionPipeline -from diffusers.pipelines.bddm.pipeline_bddm import DiffWave -from diffusers.testing_utils import floats_tensor, slow, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class ConfigTester(unittest.TestCase): - def test_load_not_from_mixin(self): - with self.assertRaises(ValueError): - ConfigMixin.from_config("dummy_path") - - def test_save_load(self): - class SampleObject(ConfigMixin): - config_name = "config.json" - - def __init__( - self, - a=2, - b=5, - c=(2, 5), - d="for diffusion", - e=[1, 3], - ): - self.register_to_config(a=a, b=b, c=c, d=d, e=e) - - obj = SampleObject() - config = obj.config - - assert config["a"] == 2 - assert config["b"] == 5 - assert config["c"] == (2, 5) - assert config["d"] == "for diffusion" - assert config["e"] == [1, 3] - - with tempfile.TemporaryDirectory() as tmpdirname: - obj.save_config(tmpdirname) - new_obj = SampleObject.from_config(tmpdirname) - new_config = new_obj.config - - # unfreeze configs - config = dict(config) - new_config = dict(new_config) - - assert config.pop("c") == (2, 5) # instantiated as tuple - assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json - assert config == new_config - - -class ModelTesterMixin: - def test_from_pretrained_save_pretrained(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_pretrained(tmpdirname) - new_model = self.model_class.from_pretrained(tmpdirname) - new_model.to(torch_device) - - with torch.no_grad(): - image = model(**inputs_dict) - new_image = new_model(**inputs_dict) - - max_diff = (image - new_image).abs().sum().item() - self.assertLessEqual(max_diff, 5e-5, "Models give different forward passes") - - def test_determinism(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - with torch.no_grad(): - first = model(**inputs_dict) - second = model(**inputs_dict) - - out_1 = first.cpu().numpy() - out_2 = second.cpu().numpy() - out_1 = out_1[~np.isnan(out_1)] - out_2 = out_2[~np.isnan(out_2)] - max_diff = np.amax(np.abs(out_1 - out_2)) - self.assertLessEqual(max_diff, 1e-5) - - def test_output(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - self.assertIsNotNone(output) - expected_shape = inputs_dict["x"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_forward_signature(self): - init_dict, _ = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["x", "timesteps"] - self.assertListEqual(arg_names[:2], expected_arg_names) - - def test_model_from_config(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - # test if the model can be loaded from the config - # and has all the expected shape - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_config(tmpdirname) - new_model = self.model_class.from_config(tmpdirname) - new_model.to(torch_device) - new_model.eval() - - # check if all paramters shape are the same - for param_name in model.state_dict().keys(): - param_1 = model.state_dict()[param_name] - param_2 = new_model.state_dict()[param_name] - self.assertEqual(param_1.shape, param_2.shape) - - with torch.no_grad(): - output_1 = model(**inputs_dict) - output_2 = new_model(**inputs_dict) - - self.assertEqual(output_1.shape, output_2.shape) - - def test_training(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - model.to(torch_device) - model.train() - output = model(**inputs_dict) - noise = torch.randn((inputs_dict["x"].shape[0],) + self.output_shape).to(torch_device) - loss = torch.nn.functional.mse_loss(output, noise) - loss.backward() - - -class UnetModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNetModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor([10]).to(torch_device) - - return {"x": noise, "timesteps": time_step} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "ch": 32, - "ch_mult": (1, 2), - "num_res_blocks": 2, - "attn_resolutions": (16,), - "resolution": 32, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = UNetModel.from_pretrained("fusing/ddpm_dummy", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = UNetModel.from_pretrained("fusing/ddpm_dummy") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - noise = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) - time_step = torch.tensor([10]) - - with torch.no_grad(): - output = model(noise, time_step) - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([0.2891, -0.1899, 0.2595, -0.6214, 0.0968, -0.2622, 0.4688, 0.1311, 0.0053]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class GlideSuperResUNetTests(ModelTesterMixin, unittest.TestCase): - model_class = GlideSuperResUNetModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 6 - sizes = (32, 32) - low_res_size = (4, 4) - - noise = torch.randn((batch_size, num_channels // 2) + sizes).to(torch_device) - low_res = torch.randn((batch_size, 3) + low_res_size).to(torch_device) - time_step = torch.tensor([10] * noise.shape[0], device=torch_device) - - return {"x": noise, "timesteps": time_step, "low_res": low_res} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (6, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "attention_resolutions": (2,), - "channel_mult": (1, 2), - "in_channels": 6, - "out_channels": 6, - "model_channels": 32, - "num_head_channels": 8, - "num_heads_upsample": 1, - "num_res_blocks": 2, - "resblock_updown": True, - "resolution": 32, - "use_scale_shift_norm": True, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_output(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - output, _ = torch.split(output, 3, dim=1) - - self.assertIsNotNone(output) - expected_shape = inputs_dict["x"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_from_pretrained_hub(self): - model, loading_info = GlideSuperResUNetModel.from_pretrained( - "fusing/glide-super-res-dummy", output_loading_info=True - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = GlideSuperResUNetModel.from_pretrained("fusing/glide-super-res-dummy") - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - noise = torch.randn(1, 3, 64, 64) - low_res = torch.randn(1, 3, 4, 4) - time_step = torch.tensor([42] * noise.shape[0]) - - with torch.no_grad(): - output = model(noise, time_step, low_res) - - output, _ = torch.split(output, 3, dim=1) - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-22.8782, -23.2652, -15.3966, -22.8034, -23.3159, -15.5640, -15.3970, -15.4614, - 10.4370]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) - - -class GlideTextToImageUNetModelTests(ModelTesterMixin, unittest.TestCase): - model_class = GlideTextToImageUNetModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - transformer_dim = 32 - seq_len = 16 - - noise = torch.randn((batch_size, num_channels) + sizes).to(torch_device) - emb = torch.randn((batch_size, seq_len, transformer_dim)).to(torch_device) - time_step = torch.tensor([10] * noise.shape[0], device=torch_device) - - return {"x": noise, "timesteps": time_step, "transformer_out": emb} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (6, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "attention_resolutions": (2,), - "channel_mult": (1, 2), - "in_channels": 3, - "out_channels": 6, - "model_channels": 32, - "num_head_channels": 8, - "num_heads_upsample": 1, - "num_res_blocks": 2, - "resblock_updown": True, - "resolution": 32, - "use_scale_shift_norm": True, - "transformer_dim": 32, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_output(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - output, _ = torch.split(output, 3, dim=1) - - self.assertIsNotNone(output) - expected_shape = inputs_dict["x"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_from_pretrained_hub(self): - model, loading_info = GlideTextToImageUNetModel.from_pretrained( - "fusing/unet-glide-text2im-dummy", output_loading_info=True - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = GlideTextToImageUNetModel.from_pretrained("fusing/unet-glide-text2im-dummy") - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - noise = torch.randn((1, model.config.in_channels, model.config.resolution, model.config.resolution)).to( - torch_device - ) - emb = torch.randn((1, 16, model.config.transformer_dim)).to(torch_device) - time_step = torch.tensor([10] * noise.shape[0], device=torch_device) - - model.to(torch_device) - with torch.no_grad(): - output = model(noise, time_step, emb) - - output, _ = torch.split(output, 3, dim=1) - output_slice = output[0, -1, -3:, -3:].cpu().flatten() - # fmt: off - expected_output_slice = torch.tensor([2.7766, -10.3558, -14.9149, -0.9376, -14.9175, -17.7679, -5.5565, -12.9521, -12.9845]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) - - -class UNetLDMModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNetLDMModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 4 - sizes = (32, 32) - - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor([10]).to(torch_device) - - return {"x": noise, "timesteps": time_step} - - @property - def input_shape(self): - return (4, 32, 32) - - @property - def output_shape(self): - return (4, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "image_size": 32, - "in_channels": 4, - "out_channels": 4, - "model_channels": 32, - "num_res_blocks": 2, - "attention_resolutions": (16,), - "channel_mult": (1, 2), - "num_heads": 2, - "conv_resample": True, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) - time_step = torch.tensor([10] * noise.shape[0]) - - with torch.no_grad(): - output = model(noise, time_step) - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) - - def test_output_pretrained_spatial_transformer(self): - model = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy-spatial") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) - context = torch.ones((1, 16, 64), dtype=torch.float32) - time_step = torch.tensor([10] * noise.shape[0]) - - with torch.no_grad(): - output = model(noise, time_step, context=context) - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([61.3445, 56.9005, 29.4339, 59.5497, 60.7375, 34.1719, 48.1951, 42.6569, 25.0890]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) - - -class UNetGradTTSModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNetGradTTSModel - - @property - def dummy_input(self): - batch_size = 4 - num_features = 32 - seq_len = 16 - - noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) - condition = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) - mask = floats_tensor((batch_size, 1, seq_len)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"x": noise, "timesteps": time_step, "mu": condition, "mask": mask} - - @property - def input_shape(self): - return (4, 32, 16) - - @property - def output_shape(self): - return (4, 32, 16) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "dim": 64, - "groups": 4, - "dim_mults": (1, 2), - "n_feats": 32, - "pe_scale": 1000, - "n_spks": 1, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = UNetGradTTSModel.from_pretrained("fusing/unet-grad-tts-dummy", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = UNetGradTTSModel.from_pretrained("fusing/unet-grad-tts-dummy") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = model.config.n_feats - seq_len = 16 - noise = torch.randn((1, num_features, seq_len)) - condition = torch.randn((1, num_features, seq_len)) - mask = torch.randn((1, 1, seq_len)) - time_step = torch.tensor([10]) - - with torch.no_grad(): - output = model(noise, time_step, condition, mask) - - output_slice = output[0, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-0.0690, -0.0531, 0.0633, -0.0660, -0.0541, 0.0650, -0.0656, -0.0555, 0.0617]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) - - -class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): - model_class = TemporalUNet - - @property - def dummy_input(self): - batch_size = 4 - num_features = 14 - seq_len = 16 - - noise = floats_tensor((batch_size, seq_len, num_features)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"x": noise, "timesteps": time_step} - - @property - def input_shape(self): - return (4, 16, 14) - - @property - def output_shape(self): - return (4, 16, 14) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "training_horizon": 128, - "dim": 32, - "dim_mults": [1, 4, 8], - "predict_epsilon": False, - "clip_denoised": True, - "transition_dim": 14, - "cond_dim": 3, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = TemporalUNet.from_pretrained( - "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = model.transition_dim - seq_len = 16 - noise = torch.randn((1, seq_len, num_features)) - time_step = torch.full((num_features,), 0) - - with torch.no_grad(): - output = model(noise, time_step) - - output_slice = output[0, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) - - -class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): - model_class = NCSNpp - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [10]).to(torch_device) - - return {"x": noise, "timesteps": time_step} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "image_size": 32, - "ch_mult": [1, 2, 2, 2], - "nf": 32, - "fir": True, - "progressive": "output_skip", - "progressive_combine": "sum", - "progressive_input": "input_skip", - "scale_by_sigma": True, - "skip_rescale": True, - "embedding_type": "fourier", - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = NCSNpp.from_pretrained("fusing/cifar10-ncsnpp-ve", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained_ve_small(self): - model = NCSNpp.from_pretrained("fusing/ncsnpp-cifar10-ve-dummy") - model.eval() - model.to(torch_device) - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) - - with torch.no_grad(): - output = model(noise, time_step) - - output_slice = output[0, -3:, -3:, -1].flatten().cpu() - # fmt: off - expected_output_slice = torch.tensor([0.1315, 0.0741, 0.0393, 0.0455, 0.0556, 0.0180, -0.0832, -0.0644, -0.0856]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - def test_output_pretrained_ve_large(self): - model = NCSNpp.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy") - model.eval() - model.to(torch_device) - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) - - with torch.no_grad(): - output = model(noise, time_step) - - output_slice = output[0, -3:, -3:, -1].flatten().cpu() - # fmt: off - expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - def test_output_pretrained_vp(self): - model = NCSNpp.from_pretrained("fusing/cifar10-ddpmpp-vp") - model.eval() - model.to(torch_device) - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = torch.randn((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [9.0]).to(torch_device) - - with torch.no_grad(): - output = model(noise, time_step) - - output_slice = output[0, -3:, -3:, -1].flatten().cpu() - # fmt: off - expected_output_slice = torch.tensor([0.3303, -0.2275, -2.8872, -0.1309, -1.2861, 3.4567, -1.0083, 2.5325, -1.3866]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class VQModelTests(ModelTesterMixin, unittest.TestCase): - model_class = VQModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - - return {"x": image} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "ch": 64, - "out_ch": 3, - "num_res_blocks": 1, - "attn_resolutions": [], - "in_channels": 3, - "resolution": 32, - "z_channels": 3, - "n_embed": 256, - "embed_dim": 3, - "sane_index_shape": False, - "ch_mult": (1,), - "dropout": 0.0, - "double_z": False, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_forward_signature(self): - pass - - def test_training(self): - pass - - def test_from_pretrained_hub(self): - model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = VQModel.from_pretrained("fusing/vqgan-dummy") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) - with torch.no_grad(): - output = model(image) - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-1.1321, 0.1056, 0.3505, -0.6461, -0.2014, 0.0419, -0.5763, -0.8462, -0.4218]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class AutoEncoderKLTests(ModelTesterMixin, unittest.TestCase): - model_class = AutoencoderKL - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - - return {"x": image} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "ch": 64, - "ch_mult": (1,), - "embed_dim": 4, - "in_channels": 3, - "num_res_blocks": 1, - "out_ch": 3, - "resolution": 32, - "z_channels": 4, - "attn_resolutions": [], - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_forward_signature(self): - pass - - def test_training(self): - pass - - def test_from_pretrained_hub(self): - model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) - with torch.no_grad(): - output = model(image, sample_posterior=True) - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-0.0814, -0.0229, -0.1320, -0.4123, -0.0366, -0.3473, 0.0438, -0.1662, 0.1750]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class PipelineTesterMixin(unittest.TestCase): - def test_from_pretrained_save_pretrained(self): - # 1. Load models - model = UNetModel(ch=32, ch_mult=(1, 2), num_res_blocks=2, attn_resolutions=(16,), resolution=32) - schedular = DDPMScheduler(timesteps=10) - - ddpm = DDPMPipeline(model, schedular) - - with tempfile.TemporaryDirectory() as tmpdirname: - ddpm.save_pretrained(tmpdirname) - new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) - - generator = torch.manual_seed(0) - - image = ddpm(generator=generator) - generator = generator.manual_seed(0) - new_image = new_ddpm(generator=generator) - - assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" - - @slow - def test_from_pretrained_hub(self): - model_path = "fusing/ddpm-cifar10" - - ddpm = DDPMPipeline.from_pretrained(model_path) - ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path) - - ddpm.noise_scheduler.num_timesteps = 10 - ddpm_from_hub.noise_scheduler.num_timesteps = 10 - - generator = torch.manual_seed(0) - - image = ddpm(generator=generator) - generator = generator.manual_seed(0) - new_image = ddpm_from_hub(generator=generator) - - assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" - - @slow - def test_ddpm_cifar10(self): - model_id = "fusing/ddpm-cifar10" - - unet = UNetModel.from_pretrained(model_id) - noise_scheduler = DDPMScheduler.from_config(model_id) - noise_scheduler = noise_scheduler.set_format("pt") - - ddpm = DDPMPipeline(unet=unet, noise_scheduler=noise_scheduler) - - generator = torch.manual_seed(0) - image = ddpm(generator=generator) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 32, 32) - expected_slice = torch.tensor( - [-0.5712, -0.6215, -0.5953, -0.5438, -0.4775, -0.4539, -0.5172, -0.4872, -0.5105] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_ddim_cifar10(self): - model_id = "fusing/ddpm-cifar10" - - unet = UNetModel.from_pretrained(model_id) - noise_scheduler = DDIMScheduler(tensor_format="pt") - - ddim = DDIMPipeline(unet=unet, noise_scheduler=noise_scheduler) - - generator = torch.manual_seed(0) - image = ddim(generator=generator, eta=0.0) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 32, 32) - expected_slice = torch.tensor( - [-0.6553, -0.6765, -0.6799, -0.6749, -0.7006, -0.6974, -0.6991, -0.7116, -0.7094] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_pndm_cifar10(self): - model_id = "fusing/ddpm-cifar10" - - unet = UNetModel.from_pretrained(model_id) - noise_scheduler = PNDMScheduler(tensor_format="pt") - - pndm = PNDMPipeline(unet=unet, noise_scheduler=noise_scheduler) - generator = torch.manual_seed(0) - image = pndm(generator=generator) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 32, 32) - expected_slice = torch.tensor( - [-0.6872, -0.7071, -0.7188, -0.7057, -0.7515, -0.7191, -0.7377, -0.7565, -0.7500] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - @unittest.skip("Skipping for now as it takes too long") - def test_ldm_text2img(self): - model_id = "fusing/latent-diffusion-text2im-large" - ldm = LatentDiffusionPipeline.from_pretrained(model_id) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - image = ldm([prompt], generator=generator, num_inference_steps=20) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 256, 256) - expected_slice = torch.tensor([0.7295, 0.7358, 0.7256, 0.7435, 0.7095, 0.6884, 0.7325, 0.6921, 0.6458]) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_ldm_text2img_fast(self): - model_id = "fusing/latent-diffusion-text2im-large" - ldm = LatentDiffusionPipeline.from_pretrained(model_id) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - image = ldm([prompt], generator=generator, num_inference_steps=1) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 256, 256) - expected_slice = torch.tensor([0.3163, 0.8670, 0.6465, 0.1865, 0.6291, 0.5139, 0.2824, 0.3723, 0.4344]) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_glide_text2img(self): - model_id = "fusing/glide-base" - glide = GlidePipeline.from_pretrained(model_id) - - prompt = "a pencil sketch of a corgi" - generator = torch.manual_seed(0) - image = glide(prompt, generator=generator, num_inference_steps_upscale=20) - - image_slice = image[0, :3, :3, -1].cpu() - - assert image.shape == (1, 256, 256, 3) - expected_slice = torch.tensor([0.7119, 0.7073, 0.6460, 0.7780, 0.7423, 0.6926, 0.7378, 0.7189, 0.7784]) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_grad_tts(self): - model_id = "fusing/grad-tts-libri-tts" - grad_tts = GradTTSPipeline.from_pretrained(model_id) - noise_scheduler = GradTTSScheduler() - grad_tts.noise_scheduler = noise_scheduler - - text = "Hello world, I missed you so much." - generator = torch.manual_seed(0) - - # generate mel spectograms using text - mel_spec = grad_tts(text, generator=generator) - - assert mel_spec.shape == (1, 80, 143) - expected_slice = torch.tensor( - [-6.7584, -6.8347, -6.3293, -6.6437, -6.7233, -6.4684, -6.1187, -6.3172, -6.6890] - ) - assert (mel_spec[0, :3, :3].cpu().flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_score_sde_ve_pipeline(self): - model = NCSNpp.from_pretrained("fusing/ffhq_ncsnpp") - scheduler = ScoreSdeVeScheduler.from_config("fusing/ffhq_ncsnpp") - - sde_ve = ScoreSdeVePipeline(model=model, scheduler=scheduler) - - torch.manual_seed(0) - image = sde_ve(num_inference_steps=2) - - expected_image_sum = 3382849024.0 - expected_image_mean = 1075.3788 - - assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2 - assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4 - - @slow - def test_score_sde_vp_pipeline(self): - model = NCSNpp.from_pretrained("fusing/cifar10-ddpmpp-vp") - scheduler = ScoreSdeVpScheduler.from_config("fusing/cifar10-ddpmpp-vp") - - sde_vp = ScoreSdeVpPipeline(model=model, scheduler=scheduler) - - torch.manual_seed(0) - image = sde_vp(num_inference_steps=10) - - expected_image_sum = 4183.2012 - expected_image_mean = 1.3617 - - assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2 - assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4 - - @slow - def test_ldm_uncond(self): - ldm = LatentDiffusionUncondPipeline.from_pretrained("fusing/latent-diffusion-celeba-256") - - generator = torch.manual_seed(0) - image = ldm(generator=generator, num_inference_steps=5) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 256, 256) - expected_slice = torch.tensor([0.5025, 0.4121, 0.3851, 0.4806, 0.3996, 0.3745, 0.4839, 0.4559, 0.4293]) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - def test_module_from_pipeline(self): - model = DiffWave(num_res_layers=4) - noise_scheduler = DDPMScheduler(timesteps=12) - - bddm = BDDMPipeline(model, noise_scheduler) - - # check if the library name for the diffwave moduel is set to pipeline module - self.assertTrue(bddm.config["diffwave"][0] == "bddm") - - # check if we can save and load the pipeline - with tempfile.TemporaryDirectory() as tmpdirname: - bddm.save_pretrained(tmpdirname) - _ = BDDMPipeline.from_pretrained(tmpdirname) - # check if the same works using the DifusionPipeline class - bddm = DiffusionPipeline.from_pretrained(tmpdirname) - - self.assertTrue(bddm.config["diffwave"][0] == "bddm") diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 1a410b93ff..94f88a6a04 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -880,7 +880,7 @@ class VQModelTests(ModelTesterMixin, unittest.TestCase): # fmt: off expected_output_slice = torch.tensor([-1.1321, 0.1056, 0.3505, -0.6461, -0.2014, 0.0419, -0.5763, -0.8462, -0.4218]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) class AutoEncoderKLTests(ModelTesterMixin, unittest.TestCase): @@ -951,7 +951,7 @@ class AutoEncoderKLTests(ModelTesterMixin, unittest.TestCase): # fmt: off expected_output_slice = torch.tensor([-0.0814, -0.0229, -0.1320, -0.4123, -0.0366, -0.3473, 0.0438, -0.1662, 0.1750]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) class PipelineTesterMixin(unittest.TestCase): From 61ea57c5a712b862a88c387892b5a25dfc504b4a Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 30 Jun 2022 22:42:06 +0000 Subject: [PATCH 26/32] clean up lots of dead code --- src/diffusers/models/unet.py | 42 --- src/diffusers/models/unet_glide.py | 13 - src/diffusers/models/unet_ldm.py | 586 ++++++----------------------- 3 files changed, 105 insertions(+), 536 deletions(-) diff --git a/src/diffusers/models/unet.py b/src/diffusers/models/unet.py index 5bc13f80f9..d3ee42dc58 100644 --- a/src/diffusers/models/unet.py +++ b/src/diffusers/models/unet.py @@ -34,48 +34,6 @@ def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) -# class ResnetBlock(nn.Module): -# def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512): -# super().__init__() -# self.in_channels = in_channels -# out_channels = in_channels if out_channels is None else out_channels -# self.out_channels = out_channels -# self.use_conv_shortcut = conv_shortcut -# -# self.norm1 = Normalize(in_channels) -# self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) -# self.temb_proj = torch.nn.Linear(temb_channels, out_channels) -# self.norm2 = Normalize(out_channels) -# self.dropout = torch.nn.Dropout(dropout) -# self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) -# if self.in_channels != self.out_channels: -# if self.use_conv_shortcut: -# self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) -# else: -# self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) -# -# def forward(self, x, temb): -# h = x -# h = self.norm1(h) -# h = nonlinearity(h) -# h = self.conv1(h) -# -# h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] -# -# h = self.norm2(h) -# h = nonlinearity(h) -# h = self.dropout(h) -# h = self.conv2(h) -# -# if self.in_channels != self.out_channels: -# if self.use_conv_shortcut: -# x = self.conv_shortcut(x) -# else: -# x = self.nin_shortcut(x) -# -# return x + h - - class UNetModel(ModelMixin, ConfigMixin): def __init__( self, diff --git a/src/diffusers/models/unet_glide.py b/src/diffusers/models/unet_glide.py index a74507974e..33efe0b4b1 100644 --- a/src/diffusers/models/unet_glide.py +++ b/src/diffusers/models/unet_glide.py @@ -29,19 +29,6 @@ def convert_module_to_f32(l): l.bias.data = l.bias.data.float() -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. diff --git a/src/diffusers/models/unet_ldm.py b/src/diffusers/models/unet_ldm.py index 9c01f0d17e..1806485481 100644 --- a/src/diffusers/models/unet_ldm.py +++ b/src/diffusers/models/unet_ldm.py @@ -78,182 +78,6 @@ def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) -# class LinearAttention(nn.Module): -# def __init__(self, dim, heads=4, dim_head=32): -# super().__init__() -# self.heads = heads -# hidden_dim = dim_head * heads -# self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) -# self.to_out = nn.Conv2d(hidden_dim, dim, 1) -# -# def forward(self, x): -# b, c, h, w = x.shape -# qkv = self.to_qkv(x) -# q, k, v = rearrange(qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3) -# import ipdb; ipdb.set_trace() -# k = k.softmax(dim=-1) -# context = torch.einsum("bhdn,bhen->bhde", k, v) -# out = torch.einsum("bhde,bhdn->bhen", context, q) -# out = rearrange(out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w) -# return self.to_out(out) -# - -# class SpatialSelfAttention(nn.Module): -# def __init__(self, in_channels): -# super().__init__() -# self.in_channels = in_channels -# -# self.norm = Normalize(in_channels) -# self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) -# self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) -# self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) -# self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) -# -# def forward(self, x): -# h_ = x -# h_ = self.norm(h_) -# q = self.q(h_) -# k = self.k(h_) -# v = self.v(h_) -# -# compute attention -# b, c, h, w = q.shape -# q = rearrange(q, "b c h w -> b (h w) c") -# k = rearrange(k, "b c h w -> b c (h w)") -# w_ = torch.einsum("bij,bjk->bik", q, k) -# -# w_ = w_ * (int(c) ** (-0.5)) -# w_ = torch.nn.functional.softmax(w_, dim=2) -# -# attend to values -# v = rearrange(v, "b c h w -> b c (h w)") -# w_ = rearrange(w_, "b i j -> b j i") -# h_ = torch.einsum("bij,bjk->bik", v, w_) -# h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) -# h_ = self.proj_out(h_) -# -# return x + h_ -# - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head**-0.5 - self.heads = heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) - - def reshape_heads_to_batch_dim(self, tensor): - batch_size, seq_len, dim = tensor.shape - head_size = self.heads - tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) - tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size) - return tensor - - def reshape_batch_dim_to_heads(self, tensor): - batch_size, seq_len, dim = tensor.shape - head_size = self.heads - tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) - tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) - return tensor - - def forward(self, x, context=None, mask=None): - batch_size, sequence_length, dim = x.shape - - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - q = self.reshape_heads_to_batch_dim(q) - k = self.reshape_heads_to_batch_dim(k) - v = self.reshape_heads_to_batch_dim(v) - - sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale - - if exists(mask): - mask = mask.reshape(batch_size, -1) - max_neg_value = -torch.finfo(sim.dtype).max - mask = mask[:, None, :].repeat(h, 1, 1) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = torch.einsum("b i j, b j d -> b i d", attn, v) - out = self.reshape_batch_dim_to_heads(out) - return self.to_out(out) - - -class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True): - super().__init__() - self.attn1 = CrossAttention( - query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout - ) # is a self-attention - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention( - query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout - ) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply - standard transformer action. Finally, reshape to image - """ - - def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None): - super().__init__() - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) - - self.transformer_blocks = nn.ModuleList( - [ - BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) - for d in range(depth) - ] - ) - - self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = x.permute(0, 2, 3, 1).reshape(b, h * w, c) - for block in self.transformer_blocks: - x = block(x, context=context) - x = x.reshape(b, h, w, c).permute(0, 3, 1, 2) - x = self.proj_out(x) - return x + x_in - - def convert_module_to_f16(l): """ Convert primitive modules to float16. @@ -274,19 +98,6 @@ def convert_module_to_f32(l): l.bias.data = l.bias.data.float() -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. @@ -330,36 +141,6 @@ def normalization(channels, swish=0.0): return GroupNorm32(num_channels=channels, num_groups=32, swish=swish) -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter(torch.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1) # NC(HW) - x = torch.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. @@ -376,39 +157,6 @@ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): return x -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x - T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = torch.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) - a = torch.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: @@ -602,21 +350,7 @@ class UNetLDMModel(ModelMixin, ConfigMixin): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( - # ResBlock( - # ch, - # time_embed_dim, - # dropout, - # out_channels=out_ch, - # dims=dims, - # use_checkpoint=use_checkpoint, - # use_scale_shift_norm=use_scale_shift_norm, - # down=True, - # ) - None - if resblock_updown - else Downsample( - ch, use_conv=conv_resample, dims=dims, out_channels=out_ch, padding=1, name="op" - ) + Downsample(ch, use_conv=conv_resample, dims=dims, out_channels=out_ch, padding=1, name="op") ) ) ch = out_ch @@ -703,21 +437,7 @@ class UNetLDMModel(ModelMixin, ConfigMixin): ) if level and i == num_res_blocks: out_ch = ch - layers.append( - # ResBlock( - # ch, - # time_embed_dim, - # dropout, - # out_channels=out_ch, - # dims=dims, - # use_checkpoint=use_checkpoint, - # use_scale_shift_norm=use_scale_shift_norm, - # up=True, - # ) - None - if resblock_updown - else Upsample(ch, use_conv=conv_resample, dims=dims, out_channels=out_ch) - ) + layers.append(Upsample(ch, use_conv=conv_resample, dims=dims, out_channels=out_ch)) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch @@ -784,215 +504,119 @@ class UNetLDMModel(ModelMixin, ConfigMixin): return self.out(h) -class EncoderUNetModel(nn.Module): +class SpatialTransformer(nn.Module): """ - The half UNet model with attention and timestep embedding. For usage, see UNet. + Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply + standard transformer action. Finally, reshape to image """ - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - pool="adaptive", - *args, - **kwargs, - ): + def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None): super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = torch.float16 if use_fp16 else torch.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), + self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) + + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) + for d in range(depth) + ] ) - self.input_blocks = nn.ModuleList( - [TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResnetBlock( - in_channels=ch, - out_channels=model_channels * mult, - dropout=dropout, - temb_channels=time_embed_dim, - eps=1e-5, - non_linearity="silu", - overwrite_for_ldm=True, - ), - ] - ch = mult * model_channels - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - # ResBlock( - # ch, - # time_embed_dim, - # dropout, - # out_channels=out_ch, - # dims=dims, - # use_checkpoint=use_checkpoint, - # use_scale_shift_norm=use_scale_shift_norm, - # down=True, - # ) - None - if resblock_updown - else Downsample( - ch, use_conv=conv_resample, dims=dims, out_channels=out_ch, padding=1, name="op" - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch + self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) - self.middle_block = TimestepEmbedSequential( - ResnetBlock( - in_channels=ch, - out_channels=None, - dropout=dropout, - temb_channels=time_embed_dim, - eps=1e-5, - non_linearity="silu", - overwrite_for_ldm=True, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ), - ResnetBlock( - in_channels=ch, - out_channels=None, - dropout=dropout, - temb_channels=time_embed_dim, - eps=1e-5, - non_linearity="silu", - overwrite_for_ldm=True, - ), - ) - self._feature_size += ch - self.pool = pool - if pool == "adaptive": - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.AdaptiveAvgPool2d((1, 1)), - zero_module(conv_nd(dims, ch, out_channels, 1)), - nn.Flatten(), - ) - elif pool == "attention": - assert num_head_channels != -1 - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - AttentionPool2d((image_size // ds), ch, num_head_channels, out_channels), - ) - elif pool == "spatial": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - nn.ReLU(), - nn.Linear(2048, self.out_channels), - ) - elif pool == "spatial_v2": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - normalization(2048), - nn.SiLU(), - nn.Linear(2048, self.out_channels), - ) - else: - raise NotImplementedError(f"Unexpected {pool} pooling") + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + x = self.proj_in(x) + x = x.permute(0, 2, 3, 1).reshape(b, h * w, c) + for block in self.transformer_blocks: + x = block(x, context=context) + x = x.reshape(b, h, w, c).permute(0, 3, 1, 2) + x = self.proj_out(x) + return x + x_in - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) +class BasicTransformerBlock(nn.Module): + def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True): + super().__init__() + self.attn1 = CrossAttention( + query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout + ) # is a self-attention + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = CrossAttention( + query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout + ) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint - def forward(self, x, timesteps): - """ - Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch - of timesteps. :return: an [N x K] Tensor of outputs. - """ - emb = self.time_embed( - get_timestep_embedding(timesteps, self.model_channels, flip_sin_to_cos=True, downscale_freq_shift=0) - ) + def forward(self, x, context=None): + x = self.attn1(self.norm1(x)) + x + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x - results = [] - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = self.middle_block(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = torch.cat(results, axis=-1) - return self.out(h) - else: - h = h.type(x.dtype) - return self.out(h) + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head**-0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) + + def reshape_heads_to_batch_dim(self, tensor): + batch_size, seq_len, dim = tensor.shape + head_size = self.heads + tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) + tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size) + return tensor + + def reshape_batch_dim_to_heads(self, tensor): + batch_size, seq_len, dim = tensor.shape + head_size = self.heads + tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) + tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) + return tensor + + def forward(self, x, context=None, mask=None): + batch_size, sequence_length, dim = x.shape + + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q = self.reshape_heads_to_batch_dim(q) + k = self.reshape_heads_to_batch_dim(k) + v = self.reshape_heads_to_batch_dim(v) + + sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale + + if exists(mask): + mask = mask.reshape(batch_size, -1) + max_neg_value = -torch.finfo(sim.dtype).max + mask = mask[:, None, :].repeat(h, 1, 1) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = torch.einsum("b i j, b j d -> b i d", attn, v) + out = self.reshape_batch_dim_to_heads(out) + return self.to_out(out) From 5018abff6ef8305c43d6520244f7e8ffb4a28bc3 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Fri, 1 Jul 2022 12:01:59 +0200 Subject: [PATCH 27/32] add fir=False back --- src/diffusers/models/resnet.py | 43 +++++++++++--- .../models/unet_sde_score_estimation.py | 59 +++++++++++-------- 2 files changed, 70 insertions(+), 32 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index f48a94039e..bad14f7e2a 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -1,4 +1,5 @@ from abc import abstractmethod +from functools import partial import numpy as np import torch @@ -78,18 +79,24 @@ class Upsample(nn.Module): upsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv=False, use_conv_transpose=False, dims=2, out_channels=None): + def __init__(self, channels, use_conv=False, use_conv_transpose=False, dims=2, out_channels=None, name="conv"): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims self.use_conv_transpose = use_conv_transpose + name = self.name if use_conv_transpose: - self.conv = conv_transpose_nd(dims, channels, self.out_channels, 4, 2, 1) + conv = conv_transpose_nd(dims, channels, self.out_channels, 4, 2, 1) elif use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1) + conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1) + + if name == "conv": + self.conv = conv + else: + self.Conv2d_0 = conv def forward(self, x): assert x.shape[1] == self.channels @@ -102,7 +109,10 @@ class Upsample(nn.Module): x = F.interpolate(x, scale_factor=2.0, mode="nearest") if self.use_conv: - x = self.conv(x) + if self.name == "conv": + x = self.conv(x) + else: + x = self.Conv2d_0(x) return x @@ -134,6 +144,8 @@ class Downsample(nn.Module): if name == "conv": self.conv = conv + elif name == "Conv2d_0": + self.Conv2d_0 = conv else: self.op = conv @@ -145,6 +157,8 @@ class Downsample(nn.Module): if self.name == "conv": return self.conv(x) + elif self.name == "Conv2d_0": + return self.Conv2d_0(x) else: return self.op(x) @@ -390,6 +404,7 @@ class ResnetBlockBigGANpp(nn.Module): up=False, down=False, dropout=0.1, + fir=False, fir_kernel=(1, 3, 3, 1), skip_rescale=True, init_scale=0.0, @@ -400,8 +415,20 @@ class ResnetBlockBigGANpp(nn.Module): self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) self.up = up self.down = down + self.fir = fir self.fir_kernel = fir_kernel + if self.up: + if self.fir: + self.upsample = partial(upsample_2d, k=self.fir_kernel, factor=2) + else: + self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest") + elif self.down: + if self.fir: + self.downsample = partial(downsample_2d, k=self.fir_kernel, factor=2) + else: + self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) + self.Conv_0 = conv2d(in_ch, out_ch, kernel_size=3, padding=1) if temb_dim is not None: self.Dense_0 = nn.Linear(temb_dim, out_ch) @@ -424,11 +451,11 @@ class ResnetBlockBigGANpp(nn.Module): h = self.act(self.GroupNorm_0(x)) if self.up: - h = upsample_2d(h, self.fir_kernel, factor=2) - x = upsample_2d(x, self.fir_kernel, factor=2) + h = self.upsample(h) + x = self.upsample(x) elif self.down: - h = downsample_2d(h, self.fir_kernel, factor=2) - x = downsample_2d(x, self.fir_kernel, factor=2) + h = self.downsample(h) + x = self.downsample(x) h = self.Conv_0(h) # Add bias to each feature map conditioned on the time embedding diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 9c82e53e70..d9a4732f0b 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -17,6 +17,7 @@ import functools import math +from unicodedata import name import numpy as np import torch @@ -27,7 +28,7 @@ from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import GaussianFourierProjection, get_timestep_embedding -from .resnet import ResnetBlockBigGANpp, downsample_2d, upfirdn2d, upsample_2d +from .resnet import Downsample, ResnetBlockBigGANpp, Upsample, downsample_2d, upfirdn2d, upsample_2d def _setup_kernel(k): @@ -184,17 +185,17 @@ class Combine(nn.Module): class FirUpsample(nn.Module): - def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir_kernel=(1, 3, 3, 1)): + def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() - out_ch = out_ch if out_ch else in_ch - if with_conv: - self.Conv2d_0 = Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) - self.with_conv = with_conv + out_channels = out_channels if out_channels else channels + if use_conv: + self.Conv2d_0 = Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) + self.use_conv = use_conv self.fir_kernel = fir_kernel - self.out_ch = out_ch + self.out_channels = out_channels def forward(self, x): - if self.with_conv: + if self.use_conv: h = _upsample_conv_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) else: h = upsample_2d(x, self.fir_kernel, factor=2) @@ -203,17 +204,17 @@ class FirUpsample(nn.Module): class FirDownsample(nn.Module): - def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir_kernel=(1, 3, 3, 1)): + def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() - out_ch = out_ch if out_ch else in_ch - if with_conv: - self.Conv2d_0 = self.Conv2d_0 = Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1) + out_channels = out_channels if out_channels else channels + if use_conv: + self.Conv2d_0 = self.Conv2d_0 = Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.fir_kernel = fir_kernel - self.with_conv = with_conv - self.out_ch = out_ch + self.use_conv = use_conv + self.out_channels = out_channels def forward(self, x): - if self.with_conv: + if self.use_conv: x = _conv_downsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) else: x = downsample_2d(x, self.fir_kernel, factor=2) @@ -234,7 +235,7 @@ class NCSNpp(ModelMixin, ConfigMixin): conv_size=3, dropout=0.0, embedding_type="fourier", - fir=True, # TODO (patil-suraj) remove this option from here and pre-trained model configs + fir=True, fir_kernel=(1, 3, 3, 1), fourier_scale=16, init_scale=0.0, @@ -258,6 +259,7 @@ class NCSNpp(ModelMixin, ConfigMixin): conv_size=conv_size, dropout=dropout, embedding_type=embedding_type, + fir=fir, fir_kernel=fir_kernel, fourier_scale=fourier_scale, init_scale=init_scale, @@ -307,24 +309,33 @@ class NCSNpp(ModelMixin, ConfigMixin): modules.append(Linear(nf * 4, nf * 4)) AttnBlock = functools.partial(AttentionBlock, overwrite_linear=True, rescale_output_factor=math.sqrt(2.0)) - Up_sample = functools.partial(FirUpsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) + + if self.fir: + Up_sample = functools.partial(FirUpsample, fir_kernel=fir_kernel) + else: + Up_sample = functools.partial(Upsample, name="Conv2d_0") if progressive == "output_skip": - self.pyramid_upsample = Up_sample(fir_kernel=fir_kernel, with_conv=False) + self.pyramid_upsample = Up_sample(channels=None, use_conv=False) elif progressive == "residual": - pyramid_upsample = functools.partial(Up_sample, fir_kernel=fir_kernel, with_conv=True) + pyramid_upsample = functools.partial(Up_sample, use_conv=True) - Down_sample = functools.partial(FirDownsample, with_conv=resamp_with_conv, fir_kernel=fir_kernel) + if self.fir: + Down_sample = functools.partial(FirDownsample, fir_kernel=fir_kernel) + else: + print("fir false") + Down_sample = functools.partial(Downsample, padding=0, name="Conv2d_0") if progressive_input == "input_skip": - self.pyramid_downsample = Down_sample(fir_kernel=fir_kernel, with_conv=False) + self.pyramid_downsample = Down_sample(channels=None, use_conv=False) elif progressive_input == "residual": - pyramid_downsample = functools.partial(Down_sample, fir_kernel=fir_kernel, with_conv=True) + pyramid_downsample = functools.partial(Down_sample, use_conv=True) ResnetBlock = functools.partial( ResnetBlockBigGANpp, act=act, dropout=dropout, + fir=fir, fir_kernel=fir_kernel, init_scale=init_scale, skip_rescale=skip_rescale, @@ -361,7 +372,7 @@ class NCSNpp(ModelMixin, ConfigMixin): in_ch *= 2 elif progressive_input == "residual": - modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch)) + modules.append(pyramid_downsample(channels=input_pyramid_ch, out_channels=in_ch)) input_pyramid_ch = in_ch hs_c.append(in_ch) @@ -402,7 +413,7 @@ class NCSNpp(ModelMixin, ConfigMixin): ) pyramid_ch = channels elif progressive == "residual": - modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch)) + modules.append(pyramid_upsample(channels=pyramid_ch, out_channels=in_ch)) pyramid_ch = in_ch else: raise ValueError(f"{progressive} is not a valid name") From 0dbc4779c8bf396d48170dda52befc83288e109f Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Fri, 1 Jul 2022 12:50:34 +0200 Subject: [PATCH 28/32] add centered back --- src/diffusers/models/unet_sde_score_estimation.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index d9a4732f0b..1c2a2d10ff 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -229,6 +229,7 @@ class NCSNpp(ModelMixin, ConfigMixin): self, image_size=1024, num_channels=3, + centered=False, attn_resolutions=(16,), ch_mult=(1, 2, 4, 8, 16, 32, 32, 32), conditional=True, @@ -253,6 +254,7 @@ class NCSNpp(ModelMixin, ConfigMixin): self.register_to_config( image_size=image_size, num_channels=num_channels, + centered=centered, attn_resolutions=attn_resolutions, ch_mult=ch_mult, conditional=conditional, @@ -457,7 +459,8 @@ class NCSNpp(ModelMixin, ConfigMixin): temb = None # If input data is in [0, 1] - x = 2 * x - 1.0 + if not self.config.centered: + x = 2 * x - 1.0 # Downsampling block input_pyramid = None From db5a05742e06d99665797036f34a0e71d0b6ec87 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Fri, 1 Jul 2022 12:54:47 +0200 Subject: [PATCH 29/32] fix typo --- src/diffusers/models/resnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index bad14f7e2a..ba0f9e819b 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -86,7 +86,7 @@ class Upsample(nn.Module): self.use_conv = use_conv self.dims = dims self.use_conv_transpose = use_conv_transpose - name = self.name + self.name = self.name if use_conv_transpose: conv = conv_transpose_nd(dims, channels, self.out_channels, 4, 2, 1) From 60a981343ef5b805c5860920bd306d303cdef7b7 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Fri, 1 Jul 2022 12:55:30 +0200 Subject: [PATCH 30/32] actually fix the typo --- src/diffusers/models/resnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index ba0f9e819b..4983016cf1 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -86,7 +86,7 @@ class Upsample(nn.Module): self.use_conv = use_conv self.dims = dims self.use_conv_transpose = use_conv_transpose - self.name = self.name + self.name = name if use_conv_transpose: conv = conv_transpose_nd(dims, channels, self.out_channels, 4, 2, 1) From 516cb9e7f88564fb150d454371a0750904e302f7 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Fri, 1 Jul 2022 12:58:50 +0200 Subject: [PATCH 31/32] fix Upsample --- src/diffusers/models/resnet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 4983016cf1..d80ecd88b0 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -88,6 +88,7 @@ class Upsample(nn.Module): self.use_conv_transpose = use_conv_transpose self.name = name + conv = None if use_conv_transpose: conv = conv_transpose_nd(dims, channels, self.out_channels, 4, 2, 1) elif use_conv: From 4c293e0e1b77bc0665463b39056d2302e27768e8 Mon Sep 17 00:00:00 2001 From: patil-suraj Date: Fri, 1 Jul 2022 13:54:33 +0200 Subject: [PATCH 32/32] fix bias when using fir up/down sample --- src/diffusers/models/unet_sde_score_estimation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/diffusers/models/unet_sde_score_estimation.py b/src/diffusers/models/unet_sde_score_estimation.py index 1c2a2d10ff..7e368b8763 100644 --- a/src/diffusers/models/unet_sde_score_estimation.py +++ b/src/diffusers/models/unet_sde_score_estimation.py @@ -17,7 +17,6 @@ import functools import math -from unicodedata import name import numpy as np import torch @@ -197,6 +196,7 @@ class FirUpsample(nn.Module): def forward(self, x): if self.use_conv: h = _upsample_conv_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) + h = h + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: h = upsample_2d(x, self.fir_kernel, factor=2) @@ -216,6 +216,7 @@ class FirDownsample(nn.Module): def forward(self, x): if self.use_conv: x = _conv_downsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) + x = x + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: x = downsample_2d(x, self.fir_kernel, factor=2) @@ -313,7 +314,7 @@ class NCSNpp(ModelMixin, ConfigMixin): AttnBlock = functools.partial(AttentionBlock, overwrite_linear=True, rescale_output_factor=math.sqrt(2.0)) if self.fir: - Up_sample = functools.partial(FirUpsample, fir_kernel=fir_kernel) + Up_sample = functools.partial(FirUpsample, fir_kernel=fir_kernel, use_conv=resamp_with_conv) else: Up_sample = functools.partial(Upsample, name="Conv2d_0") @@ -323,9 +324,8 @@ class NCSNpp(ModelMixin, ConfigMixin): pyramid_upsample = functools.partial(Up_sample, use_conv=True) if self.fir: - Down_sample = functools.partial(FirDownsample, fir_kernel=fir_kernel) + Down_sample = functools.partial(FirDownsample, fir_kernel=fir_kernel, use_conv=resamp_with_conv) else: - print("fir false") Down_sample = functools.partial(Downsample, padding=0, name="Conv2d_0") if progressive_input == "input_skip":