mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
fix E721 Do not compare types, use isinstance() (#4992)
This commit is contained in:
@@ -1138,7 +1138,7 @@ class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, Lo
|
||||
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
||||
|
||||
# 7.1 Apply denoising_end
|
||||
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1:
|
||||
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
||||
discrete_timestep_cutoff = int(
|
||||
round(
|
||||
self.scheduler.config.num_train_timesteps
|
||||
|
||||
@@ -701,7 +701,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
|
||||
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
||||
|
||||
# 10.1 Apply denoising_end
|
||||
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1:
|
||||
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
||||
discrete_timestep_cutoff = int(
|
||||
round(
|
||||
self.scheduler.config.num_train_timesteps
|
||||
|
||||
@@ -76,7 +76,7 @@ class ValueGuidedRLPipeline(DiffusionPipeline):
|
||||
return x_in * self.stds[key] + self.means[key]
|
||||
|
||||
def to_torch(self, x_in):
|
||||
if type(x_in) is dict:
|
||||
if isinstance(x_in, dict):
|
||||
return {k: self.to_torch(v) for k, v in x_in.items()}
|
||||
elif torch.is_tensor(x_in):
|
||||
return x_in.to(self.unet.device)
|
||||
|
||||
@@ -178,7 +178,7 @@ class AudioDiffusionPipeline(DiffusionPipeline):
|
||||
self.scheduler.set_timesteps(steps)
|
||||
step_generator = step_generator or generator
|
||||
# For backwards compatibility
|
||||
if type(self.unet.config.sample_size) == int:
|
||||
if isinstance(self.unet.config.sample_size, int):
|
||||
self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size)
|
||||
if noise is None:
|
||||
noise = randn_tensor(
|
||||
|
||||
@@ -810,7 +810,7 @@ class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoad
|
||||
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
||||
|
||||
# 7.1 Apply denoising_end
|
||||
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1:
|
||||
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
||||
discrete_timestep_cutoff = int(
|
||||
round(
|
||||
self.scheduler.config.num_train_timesteps
|
||||
|
||||
@@ -885,7 +885,7 @@ class StableDiffusionXLImg2ImgPipeline(
|
||||
|
||||
# 5. Prepare timesteps
|
||||
def denoising_value_valid(dnv):
|
||||
return type(denoising_end) == float and 0 < dnv < 1
|
||||
return isinstance(denoising_end, float) and 0 < dnv < 1
|
||||
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
timesteps, num_inference_steps = self.get_timesteps(
|
||||
|
||||
@@ -1120,7 +1120,7 @@ class StableDiffusionXLInpaintPipeline(
|
||||
|
||||
# 4. set timesteps
|
||||
def denoising_value_valid(dnv):
|
||||
return type(denoising_end) == float and 0 < dnv < 1
|
||||
return isinstance(denoising_end, float) and 0 < dnv < 1
|
||||
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
||||
timesteps, num_inference_steps = self.get_timesteps(
|
||||
|
||||
@@ -837,7 +837,7 @@ class StableDiffusionXLInstructPix2PixPipeline(
|
||||
|
||||
# 11. Denoising loop
|
||||
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
||||
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1:
|
||||
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
||||
discrete_timestep_cutoff = int(
|
||||
round(
|
||||
self.scheduler.config.num_train_timesteps
|
||||
|
||||
@@ -886,7 +886,7 @@ class StableDiffusionXLAdapterPipeline(
|
||||
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
||||
|
||||
# 7.1 Apply denoising_end
|
||||
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1:
|
||||
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
||||
discrete_timestep_cutoff = int(
|
||||
round(
|
||||
self.scheduler.config.num_train_timesteps
|
||||
|
||||
@@ -193,7 +193,7 @@ class ConsistencyModelPipelineSlowTests(unittest.TestCase):
|
||||
return inputs
|
||||
|
||||
def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)):
|
||||
if type(device) == str:
|
||||
if isinstance(device, str):
|
||||
device = torch.device(device)
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
||||
|
||||
@@ -109,7 +109,7 @@ class UniDiffuserPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
return inputs
|
||||
|
||||
def get_fixed_latents(self, device, seed=0):
|
||||
if type(device) == str:
|
||||
if isinstance(device, str):
|
||||
device = torch.device(device)
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
# Hardcode the shapes for now.
|
||||
@@ -545,7 +545,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
|
||||
return inputs
|
||||
|
||||
def get_fixed_latents(self, device, seed=0):
|
||||
if type(device) == str:
|
||||
if isinstance(device, str):
|
||||
device = torch.device(device)
|
||||
latent_device = torch.device("cpu")
|
||||
generator = torch.Generator(device=latent_device).manual_seed(seed)
|
||||
@@ -648,7 +648,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase):
|
||||
return inputs
|
||||
|
||||
def get_fixed_latents(self, device, seed=0):
|
||||
if type(device) == str:
|
||||
if isinstance(device, str):
|
||||
device = torch.device(device)
|
||||
latent_device = torch.device("cpu")
|
||||
generator = torch.Generator(device=latent_device).manual_seed(seed)
|
||||
|
||||
Reference in New Issue
Block a user