diff --git a/setup.py b/setup.py index 9d34037319..25e434de28 100644 --- a/setup.py +++ b/setup.py @@ -211,7 +211,7 @@ install_requires = [ setup( name="diffusers", - version="0.5.1", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + version="0.6.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="Diffusers", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 233a7ade80..0a0b0b4965 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -9,7 +9,7 @@ from .utils import ( ) -__version__ = "0.5.1" +__version__ = "0.6.0.dev0" from .configuration_utils import ConfigMixin from .onnx_utils import OnnxRuntimeModel diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 33d9bafb8a..0f1a402294 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -119,15 +119,7 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin): clip_sample: bool = True, set_alpha_to_one: bool = True, steps_offset: int = 0, - **kwargs, ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == "linear": diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 04c92904a6..d51d58ac8f 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -22,7 +22,7 @@ import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, deprecate +from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin @@ -112,15 +112,7 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): trained_betas: Optional[np.ndarray] = None, variance_type: str = "fixed_small", clip_sample: bool = True, - **kwargs, ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == "linear": diff --git a/src/diffusers/schedulers/scheduling_karras_ve.py b/src/diffusers/schedulers/scheduling_karras_ve.py index 3b0ec91ed1..743f2e061c 100644 --- a/src/diffusers/schedulers/scheduling_karras_ve.py +++ b/src/diffusers/schedulers/scheduling_karras_ve.py @@ -20,7 +20,7 @@ import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, deprecate +from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin @@ -86,15 +86,7 @@ class KarrasVeScheduler(SchedulerMixin, ConfigMixin): s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, - **kwargs, ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - # standard deviation of the initial noise distribution self.init_noise_sigma = sigma_max diff --git a/src/diffusers/schedulers/scheduling_lms_discrete.py b/src/diffusers/schedulers/scheduling_lms_discrete.py index 12dc473f63..1b8ca7c5df 100644 --- a/src/diffusers/schedulers/scheduling_lms_discrete.py +++ b/src/diffusers/schedulers/scheduling_lms_discrete.py @@ -74,15 +74,7 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[np.ndarray] = None, - **kwargs, ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == "linear": diff --git a/src/diffusers/schedulers/scheduling_pndm.py b/src/diffusers/schedulers/scheduling_pndm.py index b26840ea19..99ccc6c66f 100644 --- a/src/diffusers/schedulers/scheduling_pndm.py +++ b/src/diffusers/schedulers/scheduling_pndm.py @@ -100,15 +100,7 @@ class PNDMScheduler(SchedulerMixin, ConfigMixin): skip_prk_steps: bool = False, set_alpha_to_one: bool = False, steps_offset: int = 0, - **kwargs, ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == "linear": diff --git a/src/diffusers/schedulers/scheduling_sde_ve.py b/src/diffusers/schedulers/scheduling_sde_ve.py index 01fe222be9..d31adbc3c6 100644 --- a/src/diffusers/schedulers/scheduling_sde_ve.py +++ b/src/diffusers/schedulers/scheduling_sde_ve.py @@ -21,7 +21,7 @@ from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, deprecate +from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin, SchedulerOutput @@ -75,15 +75,7 @@ class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): sigma_max: float = 1348.0, sampling_eps: float = 1e-5, correct_steps: int = 1, - **kwargs, ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - # standard deviation of the initial noise distribution self.init_noise_sigma = sigma_max diff --git a/src/diffusers/schedulers/scheduling_sde_vp.py b/src/diffusers/schedulers/scheduling_sde_vp.py index 614e473eb8..a37a159a87 100644 --- a/src/diffusers/schedulers/scheduling_sde_vp.py +++ b/src/diffusers/schedulers/scheduling_sde_vp.py @@ -20,7 +20,6 @@ from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import deprecate from .scheduling_utils import SchedulerMixin @@ -40,13 +39,7 @@ class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): """ @register_to_config - def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3, **kwargs): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) + def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): self.sigmas = None self.discrete_sigmas = None self.timesteps = None diff --git a/src/diffusers/schedulers/scheduling_utils.py b/src/diffusers/schedulers/scheduling_utils.py index b83bf3b846..29bf982f6a 100644 --- a/src/diffusers/schedulers/scheduling_utils.py +++ b/src/diffusers/schedulers/scheduling_utils.py @@ -15,7 +15,7 @@ from dataclasses import dataclass import torch -from ..utils import BaseOutput, deprecate +from ..utils import BaseOutput SCHEDULER_CONFIG_NAME = "scheduler_config.json" @@ -41,12 +41,3 @@ class SchedulerMixin: """ config_name = SCHEDULER_CONFIG_NAME - - def set_format(self, tensor_format="pt"): - deprecate( - "set_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this function as the schedulers are always" - " in Pytorch", - ) - return self diff --git a/src/diffusers/utils/outputs.py b/src/diffusers/utils/outputs.py index 10cffeeb0d..5d902dd394 100644 --- a/src/diffusers/utils/outputs.py +++ b/src/diffusers/utils/outputs.py @@ -21,7 +21,6 @@ from typing import Any, Tuple import numpy as np -from .deprecation_utils import deprecate from .import_utils import is_torch_available @@ -86,9 +85,6 @@ class BaseOutput(OrderedDict): def __getitem__(self, k): if isinstance(k, str): inner_dict = {k: v for (k, v) in self.items()} - if self.__class__.__name__ in ["StableDiffusionPipelineOutput", "ImagePipelineOutput"] and k == "sample": - deprecate("samples", "0.6.0", "Please use `.images` or `'images'` instead.") - return inner_dict["images"] return inner_dict[k] else: return self.to_tuple()[k] diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 3b2e2d162e..17df9ca1fc 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -318,14 +318,14 @@ class PipelineFastTests(unittest.TestCase): # Warmup pass when using mps (see #372) if torch_device == "mps": generator = torch.manual_seed(0) - _ = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=1, output_type="numpy")[ - "sample" - ] + _ = ldm( + [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=1, output_type="numpy" + ).images generator = torch.manual_seed(0) - image = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="numpy")[ - "sample" - ] + image = ldm( + [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="numpy" + ).images generator = torch.manual_seed(0) image_from_tuple = ldm( @@ -1535,9 +1535,9 @@ class PipelineTesterMixin(unittest.TestCase): prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) - image = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="numpy")[ - "sample" - ] + image = ldm( + [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="numpy" + ).images image_slice = image[0, -3:, -3:, -1]