diff --git a/src/diffusers/models/autoencoders/vae.py b/src/diffusers/models/autoencoders/vae.py index 7b17196125..9c6031a988 100644 --- a/src/diffusers/models/autoencoders/vae.py +++ b/src/diffusers/models/autoencoders/vae.py @@ -286,11 +286,9 @@ class Decoder(nn.Module): sample = self.conv_in(sample) - upscale_dtype = next(iter(self.up_blocks.parameters())).dtype if torch.is_grad_enabled() and self.gradient_checkpointing: # middle sample = self._gradient_checkpointing_func(self.mid_block, sample, latent_embeds) - sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: @@ -298,7 +296,6 @@ class Decoder(nn.Module): else: # middle sample = self.mid_block(sample, latent_embeds) - sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index bf067555a8..55c261ab2f 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -335,7 +335,7 @@ class ModularPipelineBlocks(ConfigMixin, PushToHubMixin): ) expected_kwargs, optional_kwargs = block_cls._get_signature_keys(block_cls) block_kwargs = { - name: kwargs.pop(name) for name in kwargs if name in expected_kwargs or name in optional_kwargs + name: kwargs.get(name) for name in kwargs if name in expected_kwargs or name in optional_kwargs } return block_cls(**block_kwargs)