1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Add a docstring for the AutoencoderKL's encode (#5239)

* Add docstring for the AutoencoderKL's encode

#5229

* Support Python 3.8 syntax in AutoencoderKL.decode type hints

Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>

* Follow the style guidelines in AutoencoderKL's encode

#5230

---------

Co-authored-by: stano <>
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
This commit is contained in:
stano
2023-10-02 20:17:34 +03:00
committed by GitHub
parent 37a787a106
commit 7a4324cce3

View File

@@ -249,7 +249,21 @@ class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
self.set_attn_processor(processor, _remove_lora=True)
@apply_forward_hook
def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
def encode(
self, x: torch.FloatTensor, return_dict: bool = True
) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
"""
Encode a batch of images into latents.
Args:
x (`torch.FloatTensor`): Input batch of images.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
Returns:
The latent representations of the encoded images. If `return_dict` is True, a
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(x, return_dict=return_dict)