From 97ef5e0665f7da889c1626035220fa3cf2ebcea8 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 27 Jan 2023 16:52:04 +0200 Subject: [PATCH] make style --- .../textual_inversion/textual_inversion_bf16.py | 5 +---- examples/textual_inversion/textual_inversion.py | 5 +---- examples/textual_inversion/textual_inversion_flax.py | 5 +---- src/diffusers/models/attention.py | 6 ++---- src/diffusers/models/cross_attention.py | 6 ++---- .../schedulers/scheduling_euler_ancestral_discrete.py | 8 +++----- src/diffusers/schedulers/scheduling_euler_discrete.py | 8 +++----- tests/test_scheduler.py | 6 ++---- 8 files changed, 15 insertions(+), 34 deletions(-) diff --git a/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py b/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py index f20db249ec..a1aff1d91c 100644 --- a/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py +++ b/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py @@ -336,10 +336,7 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - ( - h, - w, - ) = ( + (h, w,) = ( img.shape[0], img.shape[1], ) diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index 1c4bd235e6..d6c27444e0 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -432,10 +432,7 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - ( - h, - w, - ) = ( + (h, w,) = ( img.shape[0], img.shape[1], ) diff --git a/examples/textual_inversion/textual_inversion_flax.py b/examples/textual_inversion/textual_inversion_flax.py index b37d1e2ac4..69637c3645 100644 --- a/examples/textual_inversion/textual_inversion_flax.py +++ b/examples/textual_inversion/textual_inversion_flax.py @@ -306,10 +306,7 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - ( - h, - w, - ) = ( + (h, w,) = ( img.shape[0], img.shape[1], ) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index b5acd6f4f9..ec5d6fdb38 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -94,10 +94,8 @@ class AttentionBlock(nn.Module): if use_memory_efficient_attention_xformers: if not is_xformers_available(): raise ModuleNotFoundError( - ( - "Refer to https://github.com/facebookresearch/xformers for more information on how to install" - " xformers" - ), + "Refer to https://github.com/facebookresearch/xformers for more information on how to install" + " xformers", name="xformers", ) elif not torch.cuda.is_available(): diff --git a/src/diffusers/models/cross_attention.py b/src/diffusers/models/cross_attention.py index a1d77f66ef..5f0777327d 100644 --- a/src/diffusers/models/cross_attention.py +++ b/src/diffusers/models/cross_attention.py @@ -111,10 +111,8 @@ class CrossAttention(nn.Module): ) elif not is_xformers_available(): raise ModuleNotFoundError( - ( - "Refer to https://github.com/facebookresearch/xformers for more information on how to install" - " xformers" - ), + "Refer to https://github.com/facebookresearch/xformers for more information on how to install" + " xformers", name="xformers", ) elif not torch.cuda.is_available(): diff --git a/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py b/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py index 45f939aafe..de5203e793 100644 --- a/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py +++ b/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py @@ -189,11 +189,9 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): or isinstance(timestep, torch.LongTensor) ): raise ValueError( - ( - "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" - " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" - " one of the `scheduler.timesteps` as a timestep." - ), + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep.", ) if not self.is_scale_input_called: diff --git a/src/diffusers/schedulers/scheduling_euler_discrete.py b/src/diffusers/schedulers/scheduling_euler_discrete.py index 02e5c2cd99..32adf5a61c 100644 --- a/src/diffusers/schedulers/scheduling_euler_discrete.py +++ b/src/diffusers/schedulers/scheduling_euler_discrete.py @@ -198,11 +198,9 @@ class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin): or isinstance(timestep, torch.LongTensor) ): raise ValueError( - ( - "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" - " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" - " one of the `scheduler.timesteps` as a timestep." - ), + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep.", ) if not self.is_scale_input_called: diff --git a/tests/test_scheduler.py b/tests/test_scheduler.py index d49d599c57..9d74cac698 100755 --- a/tests/test_scheduler.py +++ b/tests/test_scheduler.py @@ -537,10 +537,8 @@ class SchedulerCommonTest(unittest.TestCase): ) self.assertTrue( hasattr(scheduler, "scale_model_input"), - ( - f"{scheduler_class} does not implement a required class method `scale_model_input(sample," - " timestep)`" - ), + f"{scheduler_class} does not implement a required class method `scale_model_input(sample," + " timestep)`", ) self.assertTrue( hasattr(scheduler, "step"),