diff --git a/docs/source/api/pipelines/overview.mdx b/docs/source/api/pipelines/overview.mdx index 1c570e887b..fa29683513 100644 --- a/docs/source/api/pipelines/overview.mdx +++ b/docs/source/api/pipelines/overview.mdx @@ -139,9 +139,9 @@ from diffusers import StableDiffusionImg2ImgPipeline # load the pipeline device = "cuda" -pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", revision="fp16", torch_dtype=torch.float16 -).to(device) +pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to( + device +) # let's download an initial image url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" @@ -189,7 +189,6 @@ mask_image = download_image(mask_url).resize((512, 512)) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", - revision="fp16", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") diff --git a/docs/source/api/pipelines/stable_diffusion_2.mdx b/docs/source/api/pipelines/stable_diffusion_2.mdx index baf40be128..3dce6da4f4 100644 --- a/docs/source/api/pipelines/stable_diffusion_2.mdx +++ b/docs/source/api/pipelines/stable_diffusion_2.mdx @@ -113,7 +113,7 @@ import torch # load model and scheduler model_id = "stabilityai/stable-diffusion-x4-upscaler" -pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16) +pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipeline = pipeline.to("cuda") # let's download an image diff --git a/docs/source/optimization/fp16.mdx b/docs/source/optimization/fp16.mdx index 551805310b..e0c3d99e84 100644 --- a/docs/source/optimization/fp16.mdx +++ b/docs/source/optimization/fp16.mdx @@ -79,7 +79,7 @@ To save more GPU memory and get even more speed, you can load and run the model ```Python pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", + torch_dtype=torch.float16, ) pipe = pipe.to("cuda") @@ -107,7 +107,7 @@ from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", + torch_dtype=torch.float16, ) pipe = pipe.to("cuda") @@ -134,7 +134,7 @@ from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", + torch_dtype=torch.float16, ) pipe = pipe.to("cuda") @@ -159,7 +159,7 @@ from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", + torch_dtype=torch.float16, ) pipe = pipe.to("cuda") @@ -179,7 +179,7 @@ from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", + torch_dtype=torch.float16, ) pipe = pipe.to("cuda") @@ -234,7 +234,6 @@ def generate_inputs(): pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", torch_dtype=torch.float16, ).to("cuda") unet = pipe.unet @@ -298,7 +297,6 @@ class UNet2DConditionOutput: pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", torch_dtype=torch.float16, ).to("cuda") @@ -349,7 +347,6 @@ import torch pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", torch_dtype=torch.float16, ).to("cuda") diff --git a/docs/source/using-diffusers/custom_pipeline_examples.mdx b/docs/source/using-diffusers/custom_pipeline_examples.mdx index b51af5db91..92132b228f 100644 --- a/docs/source/using-diffusers/custom_pipeline_examples.mdx +++ b/docs/source/using-diffusers/custom_pipeline_examples.mdx @@ -58,7 +58,6 @@ guided_pipeline = DiffusionPipeline.from_pretrained( custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, - revision="fp16", torch_dtype=torch.float16, ) guided_pipeline.enable_attention_slicing() @@ -113,7 +112,6 @@ import torch pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", - revision="fp16", torch_dtype=torch.float16, safety_checker=None, # Very important for videos...lots of false positives while interpolating custom_pipeline="interpolate_stable_diffusion", @@ -159,7 +157,6 @@ pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float16, - revision="fp16", ) pipe.to("cuda") pipe.enable_attention_slicing() @@ -204,7 +201,7 @@ from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( - "hakurei/waifu-diffusion", custom_pipeline="lpw_stable_diffusion", revision="fp16", torch_dtype=torch.float16 + "hakurei/waifu-diffusion", custom_pipeline="lpw_stable_diffusion", torch_dtype=torch.float16 ) pipe = pipe.to("cuda") @@ -268,7 +265,7 @@ diffuser_pipeline = DiffusionPipeline.from_pretrained( custom_pipeline="speech_to_image_diffusion", speech_model=model, speech_processor=processor, - revision="fp16", + torch_dtype=torch.float16, ) diff --git a/docs/source/using-diffusers/img2img.mdx b/docs/source/using-diffusers/img2img.mdx index ecd9d73da6..c64d677686 100644 --- a/docs/source/using-diffusers/img2img.mdx +++ b/docs/source/using-diffusers/img2img.mdx @@ -24,9 +24,9 @@ from diffusers import StableDiffusionImg2ImgPipeline # load the pipeline device = "cuda" -pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", revision="fp16", torch_dtype=torch.float16 -).to(device) +pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to( + device +) # let's download an initial image url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" diff --git a/docs/source/using-diffusers/inpaint.mdx b/docs/source/using-diffusers/inpaint.mdx index 1bafa24455..effdae0b61 100644 --- a/docs/source/using-diffusers/inpaint.mdx +++ b/docs/source/using-diffusers/inpaint.mdx @@ -42,7 +42,6 @@ mask_image = download_image(mask_url).resize((512, 512)) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", - revision="fp16", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") diff --git a/examples/community/README.md b/examples/community/README.md index d95aff7d8a..3f4d1e10f2 100644 --- a/examples/community/README.md +++ b/examples/community/README.md @@ -57,7 +57,7 @@ guided_pipeline = DiffusionPipeline.from_pretrained( custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, - revision="fp16", + torch_dtype=torch.float16, ) guided_pipeline.enable_attention_slicing() @@ -208,7 +208,7 @@ import torch pipe = DiffusionPipeline.from_pretrained( 'hakurei/waifu-diffusion', custom_pipeline="lpw_stable_diffusion", - revision="fp16", + torch_dtype=torch.float16 ) pipe=pipe.to("cuda") @@ -275,7 +275,7 @@ diffuser_pipeline = DiffusionPipeline.from_pretrained( custom_pipeline="speech_to_image_diffusion", speech_model=model, speech_processor=processor, - revision="fp16", + torch_dtype=torch.float16, ) @@ -333,7 +333,7 @@ import torch pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="wildcard_stable_diffusion", - revision="fp16", + torch_dtype=torch.float16, ) prompt = "__animal__ sitting on a __object__ wearing a __clothing__" @@ -567,7 +567,7 @@ diffuser_pipeline = DiffusionPipeline.from_pretrained( detection_pipeline=language_detection_pipeline, translation_model=trans_model, translation_tokenizer=trans_tokenizer, - revision="fp16", + torch_dtype=torch.float16, ) @@ -615,7 +615,7 @@ mask_image = PIL.Image.open(mask_path).convert("RGB").resize((512, 512)) pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", custom_pipeline="img2img_inpainting", - revision="fp16", + torch_dtype=torch.float16 ) pipe = pipe.to("cuda") diff --git a/examples/community/wildcard_stable_diffusion.py b/examples/community/wildcard_stable_diffusion.py index 282be8e48b..ee45e62a23 100644 --- a/examples/community/wildcard_stable_diffusion.py +++ b/examples/community/wildcard_stable_diffusion.py @@ -68,7 +68,7 @@ class WildcardStableDiffusionPipeline(DiffusionPipeline): Example Usage: pipe = WildcardStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", - revision="fp16", + torch_dtype=torch.float16, ) prompt = "__animal__ sitting on a __object__ wearing a __clothing__" diff --git a/src/diffusers/pipelines/README.md b/src/diffusers/pipelines/README.md index c3202db027..07f5601ee9 100644 --- a/src/diffusers/pipelines/README.md +++ b/src/diffusers/pipelines/README.md @@ -113,7 +113,6 @@ from diffusers import StableDiffusionImg2ImgPipeline device = "cuda" pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - revision="fp16", torch_dtype=torch.float16, ).to(device) @@ -161,7 +160,6 @@ mask_image = download_image(mask_url).resize((512, 512)) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", - revision="fp16", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion.py b/tests/pipelines/altdiffusion/test_alt_diffusion.py index 99406d85c1..1d1d47bf50 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion.py @@ -248,9 +248,7 @@ class AltDiffusionPipelineIntegrationTests(unittest.TestCase): def test_alt_diffusion_text2img_pipeline_fp16(self): torch.cuda.reset_peak_memory_stats() model_id = "BAAI/AltDiffusion" - pipe = AltDiffusionPipeline.from_pretrained( - model_id, revision="fp16", torch_dtype=torch.float16, safety_checker=None - ) + pipe = AltDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index a8c0828881..3c2c3fcb13 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -527,9 +527,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase): def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() - pipe = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16 - ) + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -555,9 +553,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase): def test_stable_diffusion_vae_slicing(self): torch.cuda.reset_peak_memory_stats() - pipe = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16 - ) + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() @@ -588,9 +584,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase): assert np.abs(image_sliced - image).max() < 4e-3 def test_stable_diffusion_fp16_vs_autocast(self): - pipe = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16 - ) + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -629,9 +623,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase): callback_fn.has_been_called = False - pipe = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16 - ) + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() @@ -645,16 +637,12 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase): pipeline_id = "CompVis/stable-diffusion-v1-4" start_time = time.time() - pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained( - pipeline_id, revision="fp16", torch_dtype=torch.float16 - ) + pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline_low_cpu_mem_usage.to(torch_device) low_cpu_mem_usage_time = time.time() - start_time start_time = time.time() - _ = StableDiffusionPipeline.from_pretrained( - pipeline_id, revision="fp16", torch_dtype=torch.float16, low_cpu_mem_usage=False - ) + _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) normal_load_time = time.time() - start_time assert 2 * low_cpu_mem_usage_time < normal_load_time @@ -664,9 +652,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase): torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() - pipe = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16 - ) + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index f2949cb3ef..53df7f8571 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -303,7 +303,7 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): callback_fn.has_been_called = False pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", safety_checker=None, revision="fp16", torch_dtype=torch.float16 + "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -320,7 +320,7 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", safety_checker=None, revision="fp16", torch_dtype=torch.float16 + "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index 340b7b71f0..bd4c036934 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -212,7 +212,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): def test_stable_diffusion_inpaint_fp16(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( - "runwayml/stable-diffusion-inpainting", revision="fp16", torch_dtype=torch.float16, safety_checker=None + "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -266,7 +266,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionInpaintPipeline.from_pretrained( - "runwayml/stable-diffusion-inpainting", safety_checker=None, revision="fp16", torch_dtype=torch.float16 + "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py index 873252054b..0b63ab6dc8 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py @@ -425,7 +425,7 @@ class StableDiffusionInpaintLegacyPipelineSlowTests(unittest.TestCase): callback_fn.has_been_called = False pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( - "CompVis/stable-diffusion-v1-4", safety_checker=None, revision="fp16", torch_dtype=torch.float16 + "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index 328e0a2d3c..14571f0fe4 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -304,7 +304,7 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase): def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-base", revision="fp16", torch_dtype=torch.float16 + "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -352,7 +352,7 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase): callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-base", revision="fp16", torch_dtype=torch.float16 + "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -369,7 +369,7 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-base", revision="fp16", torch_dtype=torch.float16 + "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index d37bd3f944..184d91c487 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -484,7 +484,7 @@ class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase): callback_fn.has_been_called = False pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-depth", safety_checker=None, revision="fp16", torch_dtype=torch.float16 + "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -501,7 +501,7 @@ class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-depth", safety_checker=None, revision="fp16", torch_dtype=torch.float16 + "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index eb1bf3fe59..29e14264c0 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -188,7 +188,6 @@ class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, - revision="fp16", torch_dtype=torch.float16, safety_checker=None, ) @@ -231,7 +230,6 @@ class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): safety_checker=None, scheduler=pndm, device_map="auto", - revision="fp16", torch_dtype=torch.float16, ) pipe.to(torch_device) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index 0eb43b570b..7db23808ae 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -306,7 +306,6 @@ class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained( model_id, - revision="fp16", torch_dtype=torch.float16, ) pipe.to(torch_device) @@ -340,7 +339,6 @@ class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained( model_id, - revision="fp16", torch_dtype=torch.float16, ) pipe.to(torch_device) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index 9755bf3b18..3f96480d2e 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -329,7 +329,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): def test_stable_diffusion_attention_slicing_v_pred(self): torch.cuda.reset_peak_memory_stats() model_id = "stabilityai/stable-diffusion-2" - pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16) + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -389,9 +389,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): "sd2-text2img/astronaut_riding_a_horse_v_pred_fp16.npy" ) - pipe = StableDiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-2", revision="fp16", torch_dtype=torch.float16 - ) + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -430,9 +428,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): test_callback_fn.has_been_called = False - pipe = StableDiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-2", revision="fp16", torch_dtype=torch.float16 - ) + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() @@ -456,16 +452,12 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): pipeline_id = "stabilityai/stable-diffusion-2" start_time = time.time() - pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained( - pipeline_id, revision="fp16", torch_dtype=torch.float16 - ) + pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline_low_cpu_mem_usage.to(torch_device) low_cpu_mem_usage_time = time.time() - start_time start_time = time.time() - _ = StableDiffusionPipeline.from_pretrained( - pipeline_id, revision="fp16", torch_dtype=torch.float16, low_cpu_mem_usage=False - ) + _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) normal_load_time = time.time() - start_time assert 2 * low_cpu_mem_usage_time < normal_load_time @@ -478,7 +470,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): pipeline_id = "stabilityai/stable-diffusion-2" prompt = "Andromeda galaxy in a bottle" - pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, revision="fp16", torch_dtype=torch.float16) + pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.enable_attention_slicing(1) pipeline.enable_sequential_cpu_offload() diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index d3fff485f7..ff02ee8ea4 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -286,7 +286,6 @@ class CustomPipelineTests(unittest.TestCase): clip_model=clip_model, feature_extractor=feature_extractor, torch_dtype=torch.float16, - revision="fp16", ) pipeline.enable_attention_slicing() pipeline = pipeline.to(torch_device)