From 57084dacc5275d7212513b24837b60a28e55603d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tolga=20Cang=C3=B6z?= <46008593+tolgacangoz@users.noreply.github.com> Date: Mon, 8 Jul 2024 23:42:02 +0300 Subject: [PATCH] Remove unnecessary lines (#8569) * Remove unused line --------- Co-authored-by: Sayak Paul --- examples/community/regional_prompting_stable_diffusion.py | 2 -- src/diffusers/models/attention_processor.py | 4 +--- src/diffusers/models/controlnet_sd3.py | 2 -- src/diffusers/models/unets/unet_stable_cascade.py | 4 +--- .../pipeline_semantic_stable_diffusion.py | 1 - tests/lora/test_lora_layers_sd3.py | 1 - tests/models/transformers/test_models_prior.py | 3 --- .../test_stable_diffusion_attend_and_excite.py | 2 +- .../stable_diffusion_adapter/test_stable_diffusion_adapter.py | 1 - .../stable_diffusion_xl/test_stable_diffusion_xl_adapter.py | 1 - tests/schedulers/test_scheduler_edm_euler.py | 3 --- 11 files changed, 3 insertions(+), 21 deletions(-) diff --git a/examples/community/regional_prompting_stable_diffusion.py b/examples/community/regional_prompting_stable_diffusion.py index 19715a4fb6..cad71338fa 100644 --- a/examples/community/regional_prompting_stable_diffusion.py +++ b/examples/community/regional_prompting_stable_diffusion.py @@ -467,8 +467,6 @@ def make_emblist(self, prompts): def split_dims(xs, height, width): - xs = xs - def repeat_div(x, y): while y > 0: x = math.ceil(x / 2) diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index ac773ba481..ef25d24f9f 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -1112,9 +1112,7 @@ class FusedJointAttnProcessor2_0: key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - hidden_states = hidden_states = F.scaled_dot_product_attention( - query, key, value, dropout_p=0.0, is_causal=False - ) + hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) diff --git a/src/diffusers/models/controlnet_sd3.py b/src/diffusers/models/controlnet_sd3.py index 2b4dd0fa8b..25eb6384c6 100644 --- a/src/diffusers/models/controlnet_sd3.py +++ b/src/diffusers/models/controlnet_sd3.py @@ -308,8 +308,6 @@ class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginal "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) - height, width = hidden_states.shape[-2:] - hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too. temb = self.time_text_embed(timestep, pooled_projections) encoder_hidden_states = self.context_embedder(encoder_hidden_states) diff --git a/src/diffusers/models/unets/unet_stable_cascade.py b/src/diffusers/models/unets/unet_stable_cascade.py index 75a3dbc8ed..7deea9a714 100644 --- a/src/diffusers/models/unets/unet_stable_cascade.py +++ b/src/diffusers/models/unets/unet_stable_cascade.py @@ -478,9 +478,7 @@ class StableCascadeUNet(ModelMixin, ConfigMixin, FromOriginalModelMixin): create_custom_forward(block), x, r_embed, use_reentrant=False ) else: - x = x = torch.utils.checkpoint.checkpoint( - create_custom_forward(block), use_reentrant=False - ) + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), use_reentrant=False) if i < len(repmap): x = repmap[i](x) level_outputs.insert(0, x) diff --git a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py b/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py index 8f620b6432..6f83071f3e 100644 --- a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +++ b/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py @@ -661,7 +661,6 @@ class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): noise_guidance_edit_tmp = torch.einsum( "cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp ) - noise_guidance_edit_tmp = noise_guidance_edit_tmp noise_guidance = noise_guidance + noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu() diff --git a/tests/lora/test_lora_layers_sd3.py b/tests/lora/test_lora_layers_sd3.py index 2d4069d720..48d0b9d8a5 100644 --- a/tests/lora/test_lora_layers_sd3.py +++ b/tests/lora/test_lora_layers_sd3.py @@ -153,7 +153,6 @@ class SD3LoRATests(unittest.TestCase): pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) - inputs = self.get_dummy_inputs(torch_device) pipe.transformer.add_adapter(transformer_config) self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") diff --git a/tests/models/transformers/test_models_prior.py b/tests/models/transformers/test_models_prior.py index d10ee17f3f..d2ed10dfa1 100644 --- a/tests/models/transformers/test_models_prior.py +++ b/tests/models/transformers/test_models_prior.py @@ -144,9 +144,6 @@ class PriorTransformerTests(ModelTesterMixin, unittest.TestCase): class PriorTransformerIntegrationTests(unittest.TestCase): def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0): torch.manual_seed(seed) - batch_size = batch_size - embedding_dim = embedding_dim - num_embeddings = num_embeddings hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py index 3c60a15ea3..4c2b3a3c1e 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py @@ -142,7 +142,7 @@ class StableDiffusionAttendAndExcitePipelineFastTests( generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) - inputs = inputs = { + inputs = { "prompt": "a cat and a frog", "token_indices": [2, 5], "generator": generator, diff --git a/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py b/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py index 78b36d71dd..678bffcefa 100644 --- a/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py +++ b/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py @@ -538,7 +538,6 @@ class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterM # batchify inputs batched_inputs = {} - batch_size = batch_size for name, value in inputs.items(): if name in self.batch_params: # prompt is string diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py index 416703d231..7093ed46d0 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -574,7 +574,6 @@ class StableDiffusionXLMultiAdapterPipelineFastTests( # batchify inputs batched_inputs = {} - batch_size = batch_size for name, value in inputs.items(): if name in self.batch_params: # prompt is string diff --git a/tests/schedulers/test_scheduler_edm_euler.py b/tests/schedulers/test_scheduler_edm_euler.py index a2f6fd9bad..acac4b1f4c 100644 --- a/tests/schedulers/test_scheduler_edm_euler.py +++ b/tests/schedulers/test_scheduler_edm_euler.py @@ -89,9 +89,6 @@ class EDMEulerSchedulerTest(SchedulerCommonTest): scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) - sample = self.dummy_sample - residual = 0.1 * sample - with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname)