From 37a787a106ff48e16ba64fa2df0c8fb446a6056e Mon Sep 17 00:00:00 2001 From: stano Date: Mon, 2 Oct 2023 19:42:32 +0300 Subject: [PATCH] Add docstring for the AutoencoderKL's decode (#5242) * Add docstring for the AutoencoderKL's decode #5230 * Follow the style guidelines in AutoencoderKL's decode #5230 --------- Co-authored-by: stano <> --- src/diffusers/models/autoencoder_kl.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/diffusers/models/autoencoder_kl.py b/src/diffusers/models/autoencoder_kl.py index 21c8f64fd9..7e3b925df7 100644 --- a/src/diffusers/models/autoencoder_kl.py +++ b/src/diffusers/models/autoencoder_kl.py @@ -281,6 +281,20 @@ class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin): @apply_forward_hook def decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + """ + Decode a batch of images. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + + """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices)