1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Remove unnecessary lines (#8569)

* Remove unused line


---------

Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
This commit is contained in:
Tolga Cangöz
2024-07-08 23:42:02 +03:00
committed by GitHub
parent 70611a1068
commit 57084dacc5
11 changed files with 3 additions and 21 deletions

View File

@@ -467,8 +467,6 @@ def make_emblist(self, prompts):
def split_dims(xs, height, width):
xs = xs
def repeat_div(x, y):
while y > 0:
x = math.ceil(x / 2)

View File

@@ -1112,9 +1112,7 @@ class FusedJointAttnProcessor2_0:
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
hidden_states = hidden_states = F.scaled_dot_product_attention(
query, key, value, dropout_p=0.0, is_causal=False
)
hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
hidden_states = hidden_states.to(query.dtype)

View File

@@ -308,8 +308,6 @@ class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginal
"Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
)
height, width = hidden_states.shape[-2:]
hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
temb = self.time_text_embed(timestep, pooled_projections)
encoder_hidden_states = self.context_embedder(encoder_hidden_states)

View File

@@ -478,9 +478,7 @@ class StableCascadeUNet(ModelMixin, ConfigMixin, FromOriginalModelMixin):
create_custom_forward(block), x, r_embed, use_reentrant=False
)
else:
x = x = torch.utils.checkpoint.checkpoint(
create_custom_forward(block), use_reentrant=False
)
x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), use_reentrant=False)
if i < len(repmap):
x = repmap[i](x)
level_outputs.insert(0, x)

View File

@@ -661,7 +661,6 @@ class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
noise_guidance_edit_tmp = torch.einsum(
"cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp
)
noise_guidance_edit_tmp = noise_guidance_edit_tmp
noise_guidance = noise_guidance + noise_guidance_edit_tmp
self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu()

View File

@@ -153,7 +153,6 @@ class SD3LoRATests(unittest.TestCase):
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
pipe.transformer.add_adapter(transformer_config)
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")

View File

@@ -144,9 +144,6 @@ class PriorTransformerTests(ModelTesterMixin, unittest.TestCase):
class PriorTransformerIntegrationTests(unittest.TestCase):
def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0):
torch.manual_seed(seed)
batch_size = batch_size
embedding_dim = embedding_dim
num_embeddings = num_embeddings
hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device)

View File

@@ -142,7 +142,7 @@ class StableDiffusionAttendAndExcitePipelineFastTests(
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = inputs = {
inputs = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,

View File

@@ -538,7 +538,6 @@ class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterM
# batchify inputs
batched_inputs = {}
batch_size = batch_size
for name, value in inputs.items():
if name in self.batch_params:
# prompt is string

View File

@@ -574,7 +574,6 @@ class StableDiffusionXLMultiAdapterPipelineFastTests(
# batchify inputs
batched_inputs = {}
batch_size = batch_size
for name, value in inputs.items():
if name in self.batch_params:
# prompt is string

View File

@@ -89,9 +89,6 @@ class EDMEulerSchedulerTest(SchedulerCommonTest):
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)