diff --git a/src/diffusers/models/adapter.py b/src/diffusers/models/adapter.py index bc0c58dc29..0a79995cb4 100644 --- a/src/diffusers/models/adapter.py +++ b/src/diffusers/models/adapter.py @@ -136,7 +136,6 @@ class FullAdapter(nn.Module): downscale_factor: int = 8, ): super().__init__() - print(f"From {self.__class__} channels: {channels}.") in_channels = in_channels * downscale_factor**2 @@ -164,7 +163,7 @@ class FullAdapter(nn.Module): for block in self.body: x = block(x) features.append(x) - print(f"Number of features: {len(features)}") + return features @@ -293,7 +292,7 @@ class LightAdapter(nn.Module): for block in self.body: x = block(x) features.append(x) - print(f"Number of features: {len(features)}") + return features diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index f041bf56ae..5695429489 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -920,7 +920,6 @@ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin) is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None down_block_res_samples = (sample,) - print(f"From UNet before down blocks: {len(down_block_additional_residuals)}") for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D @@ -967,7 +966,6 @@ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin) encoder_attention_mask=encoder_attention_mask, ) # To support T2I-Adapter-XL - print(f"From UNet in mid block: {len(down_block_additional_residuals)}") if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py index 3f7e19c754..4aa911198a 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -711,7 +711,6 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline): # 7. Denoising loop adapter_state = self.adapter(adapter_input) - print(f"From pipeline (before rejigging): {len(adapter_state)}.") for k, v in enumerate(adapter_state): adapter_state[k] = v * adapter_conditioning_scale if num_images_per_prompt > 1: @@ -720,7 +719,6 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline): if do_classifier_free_guidance: for k, v in enumerate(adapter_state): adapter_state[k] = torch.cat([v] * 2, dim=0) - print(f"From pipeline (after rejigging): {len(adapter_state)}.") num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: