1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

[CI] Nightly Test Updates (#9380)

* update

* update

* update

* update

* update

---------

Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
Co-authored-by: YiYi Xu <yixu310@gmail.com>
This commit is contained in:
Dhruv Nair
2024-09-12 20:21:28 +05:30
committed by GitHub
parent 6cf8d98ce1
commit 1e8cf2763d
36 changed files with 260 additions and 893 deletions

View File

@@ -20,7 +20,6 @@ import numpy as np
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from huggingface_hub.repocard import RepoCard
from safetensors.torch import load_file
from transformers import CLIPTextModel, CLIPTokenizer
@@ -103,7 +102,7 @@ class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
@slow
@require_torch_gpu
def test_integration_move_lora_cpu(self):
path = "runwayml/stable-diffusion-v1-5"
path = "Jiali/stable-diffusion-1.5"
lora_id = "takuma104/lora-test-text-encoder-lora-target"
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
@@ -162,7 +161,7 @@ class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
def test_integration_move_lora_dora_cpu(self):
from peft import LoraConfig
path = "Lykon/dreamshaper-8"
path = "Jiali/stable-diffusion-1.5"
unet_lora_config = LoraConfig(
init_lora_weights="gaussian",
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
@@ -222,7 +221,7 @@ class LoraIntegrationTests(unittest.TestCase):
torch.cuda.empty_cache()
def test_integration_logits_with_scale(self):
path = "runwayml/stable-diffusion-v1-5"
path = "Jiali/stable-diffusion-1.5"
lora_id = "takuma104/lora-test-text-encoder-lora-target"
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32)
@@ -254,7 +253,7 @@ class LoraIntegrationTests(unittest.TestCase):
release_memory(pipe)
def test_integration_logits_no_scale(self):
path = "runwayml/stable-diffusion-v1-5"
path = "Jiali/stable-diffusion-1.5"
lora_id = "takuma104/lora-test-text-encoder-lora-target"
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32)
@@ -284,8 +283,8 @@ class LoraIntegrationTests(unittest.TestCase):
generator = torch.Generator("cpu").manual_seed(0)
lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example"
card = RepoCard.load(lora_model_id)
base_model_id = card.data.to_dict()["base_model"]
base_model_id = "Jiali/stable-diffusion-1.5"
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
pipe = pipe.to(torch_device)
@@ -308,8 +307,8 @@ class LoraIntegrationTests(unittest.TestCase):
generator = torch.Generator().manual_seed(0)
lora_model_id = "hf-internal-testing/lora-trained"
card = RepoCard.load(lora_model_id)
base_model_id = card.data.to_dict()["base_model"]
base_model_id = "Jiali/stable-diffusion-1.5"
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
pipe = pipe.to(torch_device)
@@ -420,7 +419,7 @@ class LoraIntegrationTests(unittest.TestCase):
def test_kohya_sd_v15_with_higher_dimensions(self):
generator = torch.Generator().manual_seed(0)
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to(
torch_device
)
lora_model_id = "hf-internal-testing/urushisato-lora"
@@ -444,8 +443,8 @@ class LoraIntegrationTests(unittest.TestCase):
generator = torch.Generator().manual_seed(0)
lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4"
card = RepoCard.load(lora_model_id)
base_model_id = card.data.to_dict()["base_model"]
base_model_id = "Jiali/stable-diffusion-1.5"
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
pipe = pipe.to(torch_device)
@@ -468,7 +467,7 @@ class LoraIntegrationTests(unittest.TestCase):
prompt = "masterpiece, best quality, mountain"
num_inference_steps = 2
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to(
torch_device
)
initial_images = pipe(
@@ -506,7 +505,7 @@ class LoraIntegrationTests(unittest.TestCase):
prompt = "masterpiece, best quality, mountain"
num_inference_steps = 2
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to(
torch_device
)
initial_images = pipe(
@@ -548,9 +547,9 @@ class LoraIntegrationTests(unittest.TestCase):
def test_not_empty_state_dict(self):
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
pipe = AutoPipelineForText2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
).to(torch_device)
pipe = AutoPipelineForText2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to(
torch_device
)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors")
@@ -562,9 +561,9 @@ class LoraIntegrationTests(unittest.TestCase):
def test_load_unload_load_state_dict(self):
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
pipe = AutoPipelineForText2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
).to(torch_device)
pipe = AutoPipelineForText2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to(
torch_device
)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors")
@@ -581,7 +580,7 @@ class LoraIntegrationTests(unittest.TestCase):
release_memory(pipe)
def test_sdv1_5_lcm_lora(self):
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
pipe = DiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
@@ -609,7 +608,7 @@ class LoraIntegrationTests(unittest.TestCase):
release_memory(pipe)
def test_sdv1_5_lcm_lora_img2img(self):
pipe = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
pipe = AutoPipelineForImage2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
@@ -650,7 +649,7 @@ class LoraIntegrationTests(unittest.TestCase):
This test simply checks that loading a LoRA with an empty network alpha works fine
See: https://github.com/huggingface/diffusers/issues/5606
"""
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
pipeline = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
pipeline.enable_sequential_cpu_offload()
civitai_path = hf_hub_download("ybelkada/test-ahi-civitai", "ahi_lora_weights.safetensors")
pipeline.load_lora_weights(civitai_path, adapter_name="ahri")

View File

@@ -1051,7 +1051,7 @@ class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase):
def test_sd(self):
vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None)
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", vae=vae, safety_checker=None)
pipe.to(torch_device)
out = pipe(
@@ -1099,7 +1099,7 @@ class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase):
"openai/consistency-decoder", torch_dtype=torch.float16
) # TODO - update
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
"Jiali/stable-diffusion-1.5",
torch_dtype=torch.float16,
vae=vae,
safety_checker=None,
@@ -1124,7 +1124,7 @@ class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase):
def test_vae_tiling(self):
vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None, torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", vae=vae, safety_checker=None, torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

View File

@@ -1376,7 +1376,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
@require_torch_accelerator
@skip_mps
def test_compvis_sd_v1_5(self, seed, timestep, expected_slice):
model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5")
model = self.get_unet_model(model_id="Jiali/stable-diffusion-1.5")
latents = self.get_latents(seed)
encoder_hidden_states = self.get_encoder_hidden_states(seed)
@@ -1404,7 +1404,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
)
@require_torch_accelerator_with_fp16
def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice):
model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5", fp16=True)
model = self.get_unet_model(model_id="Jiali/stable-diffusion-1.5", fp16=True)
latents = self.get_latents(seed, fp16=True)
encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)
@@ -1433,7 +1433,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
@require_torch_accelerator
@skip_mps
def test_compvis_sd_inpaint(self, seed, timestep, expected_slice):
model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting")
model = self.get_unet_model(model_id="botp/stable-diffusion-v1-5-inpainting")
latents = self.get_latents(seed, shape=(4, 9, 64, 64))
encoder_hidden_states = self.get_encoder_hidden_states(seed)
@@ -1461,7 +1461,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
)
@require_torch_accelerator_with_fp16
def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice):
model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting", fp16=True)
model = self.get_unet_model(model_id="botp/stable-diffusion-v1-5-inpainting", fp16=True)
latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True)
encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)

View File

@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
@@ -21,7 +20,12 @@ import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
@@ -65,9 +69,7 @@ class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
vqvae = VQModel(
act_fn="silu",
block_out_channels=[8],
down_block_types=[
"DownEncoderBlock2D",
],
down_block_types=["DownEncoderBlock2D"],
in_channels=3,
latent_channels=8,
layers_per_block=1,
@@ -75,9 +77,7 @@ class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
num_vq_embeddings=8,
out_channels=3,
sample_size=8,
up_block_types=[
"UpDecoderBlock2D",
],
up_block_types=["UpDecoderBlock2D"],
mid_block_add_attention=False,
lookup_from_codebook=True,
)
@@ -96,7 +96,6 @@ class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
)
text_encoder = CLIPTextModelWithProjection(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"transformer": transformer,
"scheduler": scheduler,
@@ -135,47 +134,37 @@ class AmusedPipelineSlowTests(unittest.TestCase):
def test_amused_256(self):
pipe = AmusedPipeline.from_pretrained("amused/amused-256")
pipe.to(torch_device)
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.4011, 0.3992, 0.3790, 0.3856, 0.3772, 0.3711, 0.3919, 0.3850, 0.3625])
assert np.abs(image_slice - expected_slice).max() < 3e-3
expected_slice = np.array([0.4011, 0.3992, 0.379, 0.3856, 0.3772, 0.3711, 0.3919, 0.385, 0.3625])
assert np.abs(image_slice - expected_slice).max() < 0.003
def test_amused_256_fp16(self):
pipe = AmusedPipeline.from_pretrained("amused/amused-256", variant="fp16", torch_dtype=torch.float16)
pipe.to(torch_device)
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.0554, 0.05129, 0.0344, 0.0452, 0.0476, 0.0271, 0.0495, 0.0527, 0.0158])
assert np.abs(image_slice - expected_slice).max() < 7e-3
assert np.abs(image_slice - expected_slice).max() < 0.007
def test_amused_512(self):
pipe = AmusedPipeline.from_pretrained("amused/amused-512")
pipe.to(torch_device)
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.9960, 0.9960, 0.9946, 0.9980, 0.9947, 0.9932, 0.9960, 0.9961, 0.9947])
assert np.abs(image_slice - expected_slice).max() < 3e-3
expected_slice = np.array([0.1199, 0.1171, 0.1229, 0.1188, 0.1210, 0.1147, 0.1260, 0.1346, 0.1152])
assert np.abs(image_slice - expected_slice).max() < 0.003
def test_amused_512_fp16(self):
pipe = AmusedPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16)
pipe.to(torch_device)
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.9983, 1.0, 1.0, 1.0, 1.0, 0.9989, 0.9994, 0.9976, 0.9977])
assert np.abs(image_slice - expected_slice).max() < 3e-3
expected_slice = np.array([0.1509, 0.1492, 0.1531, 0.1485, 0.1501, 0.1465, 0.1581, 0.1690, 0.1499])
assert np.abs(image_slice - expected_slice).max() < 0.003

View File

@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
@@ -22,7 +21,12 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokeni
from diffusers import AmusedImg2ImgPipeline, AmusedScheduler, UVit2DModel, VQModel
from diffusers.utils import load_image
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
@@ -35,9 +39,7 @@ class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = AmusedImg2ImgPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "latents"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {
"latents",
}
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
torch.manual_seed(0)
@@ -69,19 +71,15 @@ class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
vqvae = VQModel(
act_fn="silu",
block_out_channels=[8],
down_block_types=[
"DownEncoderBlock2D",
],
down_block_types=["DownEncoderBlock2D"],
in_channels=3,
latent_channels=8,
layers_per_block=1,
norm_num_groups=8,
num_vq_embeddings=32, # reducing this to 16 or 8 -> RuntimeError: "cdist_cuda" not implemented for 'Half'
num_vq_embeddings=32,
out_channels=3,
sample_size=8,
up_block_types=[
"UpDecoderBlock2D",
],
up_block_types=["UpDecoderBlock2D"],
mid_block_add_attention=False,
lookup_from_codebook=True,
)
@@ -100,7 +98,6 @@ class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
)
text_encoder = CLIPTextModelWithProjection(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"transformer": transformer,
"scheduler": scheduler,
@@ -139,13 +136,11 @@ class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
def test_amused_256(self):
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256")
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg")
.resize((256, 256))
.convert("RGB")
)
image = pipe(
"winter mountains",
image,
@@ -153,24 +148,19 @@ class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.9993, 1.0, 0.9996, 1.0, 0.9995, 0.9925, 0.9990, 0.9954, 1.0])
assert np.abs(image_slice - expected_slice).max() < 1e-2
expected_slice = np.array([0.9993, 1.0, 0.9996, 1.0, 0.9995, 0.9925, 0.999, 0.9954, 1.0])
assert np.abs(image_slice - expected_slice).max() < 0.01
def test_amused_256_fp16(self):
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256", torch_dtype=torch.float16, variant="fp16")
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg")
.resize((256, 256))
.convert("RGB")
)
image = pipe(
"winter mountains",
image,
@@ -178,24 +168,19 @@ class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.9980, 0.9980, 0.9940, 0.9944, 0.9960, 0.9908, 1.0, 1.0, 0.9986])
assert np.abs(image_slice - expected_slice).max() < 1e-2
expected_slice = np.array([0.998, 0.998, 0.994, 0.9944, 0.996, 0.9908, 1.0, 1.0, 0.9986])
assert np.abs(image_slice - expected_slice).max() < 0.01
def test_amused_512(self):
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512")
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg")
.resize((512, 512))
.convert("RGB")
)
image = pipe(
"winter mountains",
image,
@@ -203,23 +188,20 @@ class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.1344, 0.0985, 0.0, 0.1194, 0.1809, 0.0765, 0.0854, 0.1371, 0.0933])
expected_slice = np.array([0.2809, 0.1879, 0.2027, 0.2418, 0.1852, 0.2145, 0.2484, 0.2425, 0.2317])
assert np.abs(image_slice - expected_slice).max() < 0.1
def test_amused_512_fp16(self):
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16)
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg")
.resize((512, 512))
.convert("RGB")
)
image = pipe(
"winter mountains",
image,
@@ -227,9 +209,8 @@ class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.1536, 0.1767, 0.0227, 0.1079, 0.2400, 0.1427, 0.1511, 0.1564, 0.1542])
expected_slice = np.array([0.2795, 0.1867, 0.2028, 0.2450, 0.1856, 0.2140, 0.2473, 0.2406, 0.2313])
assert np.abs(image_slice - expected_slice).max() < 0.1

View File

@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
@@ -22,7 +21,12 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokeni
from diffusers import AmusedInpaintPipeline, AmusedScheduler, UVit2DModel, VQModel
from diffusers.utils import load_image
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
@@ -35,9 +39,7 @@ class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = AmusedInpaintPipeline
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {
"latents",
}
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
torch.manual_seed(0)
@@ -50,7 +52,7 @@ class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
micro_cond_embed_dim=10,
encoder_hidden_size=8,
vocab_size=32,
codebook_size=32, # codebook size needs to be consistent with num_vq_embeddings for inpaint tests
codebook_size=32,
in_channels=8,
block_out_channels=8,
num_res_blocks=1,
@@ -69,19 +71,15 @@ class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
vqvae = VQModel(
act_fn="silu",
block_out_channels=[8],
down_block_types=[
"DownEncoderBlock2D",
],
down_block_types=["DownEncoderBlock2D"],
in_channels=3,
latent_channels=8,
layers_per_block=1,
norm_num_groups=8,
num_vq_embeddings=32, # reducing this to 16 or 8 -> RuntimeError: "cdist_cuda" not implemented for 'Half'
num_vq_embeddings=32,
out_channels=3,
sample_size=8,
up_block_types=[
"UpDecoderBlock2D",
],
up_block_types=["UpDecoderBlock2D"],
mid_block_add_attention=False,
lookup_from_codebook=True,
)
@@ -100,7 +98,6 @@ class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
)
text_encoder = CLIPTextModelWithProjection(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"transformer": transformer,
"scheduler": scheduler,
@@ -143,13 +140,11 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
def test_amused_256(self):
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256")
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg")
.resize((256, 256))
.convert("RGB")
)
mask_image = (
load_image(
"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png"
@@ -157,7 +152,6 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
.resize((256, 256))
.convert("L")
)
image = pipe(
"winter mountains",
image,
@@ -166,9 +160,7 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.0699, 0.0716, 0.0608, 0.0715, 0.0797, 0.0638, 0.0802, 0.0924, 0.0634])
assert np.abs(image_slice - expected_slice).max() < 0.1
@@ -176,13 +168,11 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
def test_amused_256_fp16(self):
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256", variant="fp16", torch_dtype=torch.float16)
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg")
.resize((256, 256))
.convert("RGB")
)
mask_image = (
load_image(
"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png"
@@ -190,7 +180,6 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
.resize((256, 256))
.convert("L")
)
image = pipe(
"winter mountains",
image,
@@ -199,23 +188,19 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.0735, 0.0749, 0.0650, 0.0739, 0.0805, 0.0667, 0.0802, 0.0923, 0.0622])
expected_slice = np.array([0.0735, 0.0749, 0.065, 0.0739, 0.0805, 0.0667, 0.0802, 0.0923, 0.0622])
assert np.abs(image_slice - expected_slice).max() < 0.1
def test_amused_512(self):
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-512")
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg")
.resize((512, 512))
.convert("RGB")
)
mask_image = (
load_image(
"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png"
@@ -223,7 +208,6 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
.resize((512, 512))
.convert("L")
)
image = pipe(
"winter mountains",
image,
@@ -232,9 +216,7 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0005, 0.0])
assert np.abs(image_slice - expected_slice).max() < 0.05
@@ -242,13 +224,11 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
def test_amused_512_fp16(self):
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16)
pipe.to(torch_device)
image = (
load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg")
.resize((512, 512))
.convert("RGB")
)
mask_image = (
load_image(
"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png"
@@ -256,7 +236,6 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
.resize((512, 512))
.convert("L")
)
image = pipe(
"winter mountains",
image,
@@ -265,9 +244,8 @@ class AmusedInpaintPipelineSlowTests(unittest.TestCase):
num_inference_steps=2,
output_type="np",
).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0025, 0.0])
assert np.abs(image_slice - expected_slice).max() < 3e-3
expected_slice = np.array([0.0227, 0.0157, 0.0098, 0.0213, 0.0250, 0.0127, 0.0280, 0.0380, 0.0095])
assert np.abs(image_slice - expected_slice).max() < 0.003

View File

@@ -73,7 +73,7 @@ def _test_stable_diffusion_compile(in_queue, out_queue, timeout):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.to("cuda")
pipe.set_progress_bar_config(disable=None)
@@ -715,7 +715,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -742,7 +742,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -769,7 +769,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -796,7 +796,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -823,7 +823,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -850,7 +850,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -877,7 +877,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -904,7 +904,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -935,7 +935,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
@@ -961,7 +961,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -993,7 +993,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
@@ -1035,7 +1035,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -1081,7 +1081,7 @@ class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase):
controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny]
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny]
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)

View File

@@ -407,7 +407,7 @@ class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)

View File

@@ -459,7 +459,7 @@ class ControlNetInpaintPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None, controlnet=controlnet
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
@@ -504,7 +504,7 @@ class ControlNetInpaintPipelineSlowTests(unittest.TestCase):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint")
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
"Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()

View File

@@ -41,7 +41,7 @@ class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
"lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
"Jiali/stable-diffusion-1.5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
@@ -86,7 +86,7 @@ class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
"lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
"Jiali/stable-diffusion-1.5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params

View File

@@ -170,7 +170,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_text_to_image(self):
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
)
pipeline.to(torch_device)
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
@@ -200,7 +200,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_image_to_image(self):
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
)
pipeline.to(torch_device)
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
@@ -232,7 +232,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_inpainting(self):
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
pipeline = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
)
pipeline.to(torch_device)
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
@@ -260,7 +260,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_text_to_image_model_cpu_offload(self):
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
)
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
pipeline.to(torch_device)
@@ -287,7 +287,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_text_to_image_full_face(self):
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
)
pipeline.to(torch_device)
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin")
@@ -304,7 +304,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_unload(self):
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
)
before_processors = [attn_proc.__class__ for attn_proc in pipeline.unet.attn_processors.values()]
pipeline.to(torch_device)
@@ -323,7 +323,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_multi(self):
image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder")
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype
)
pipeline.to(torch_device)
pipeline.load_ip_adapter(
@@ -343,7 +343,7 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin):
def test_text_to_image_face_id(self):
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, torch_dtype=self.dtype
"Jiali/stable-diffusion-1.5", safety_checker=None, torch_dtype=self.dtype
)
pipeline.to(torch_device)
pipeline.load_ip_adapter(

View File

@@ -224,7 +224,7 @@ class LEditsPPPipelineStableDiffusionSlowTests(unittest.TestCase):
def test_ledits_pp_editing(self):
pipe = LEditsPPPipelineStableDiffusion.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

View File

@@ -33,7 +33,6 @@ from diffusers.utils import logging
from diffusers.utils.testing_utils import (
CaptureLogger,
enable_full_determinism,
print_tensor_test,
torch_device,
)
@@ -173,7 +172,6 @@ class PixArtSigmaPAGPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
inputs = self.get_dummy_inputs(device)
image = pipe_pag(**inputs).images
image_slice = image[0, -3:, -3:, -1]
print_tensor_test(image_slice)
assert image.shape == (
1,

View File

@@ -283,7 +283,7 @@ class StableDiffusionPAGPipelineFastTests(
@require_torch_gpu
class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase):
pipeline_class = StableDiffusionPAGPipeline
repo_id = "runwayml/stable-diffusion-v1-5"
repo_id = "Jiali/stable-diffusion-1.5"
def setUp(self):
super().setUp()

View File

@@ -287,7 +287,7 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
def test_positive_guidance(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
@@ -370,7 +370,7 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
def test_negative_guidance(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
@@ -453,7 +453,7 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
def test_multi_cond_guidance(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
@@ -536,7 +536,7 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
def test_guidance_fp16(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

View File

@@ -250,10 +250,10 @@ class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase):
def test_inference_ddim(self):
ddim_scheduler = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx"
"Jiali/stable-diffusion-1.5", subfolder="scheduler", revision="onnx"
)
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
"Jiali/stable-diffusion-1.5",
revision="onnx",
scheduler=ddim_scheduler,
safety_checker=None,
@@ -276,10 +276,10 @@ class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase):
def test_inference_k_lms(self):
lms_scheduler = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx"
"Jiali/stable-diffusion-1.5", subfolder="scheduler", revision="onnx"
)
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
"Jiali/stable-diffusion-1.5",
revision="onnx",
scheduler=lms_scheduler,
safety_checker=None,
@@ -327,7 +327,7 @@ class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase):
test_callback_fn.has_been_called = False
pipe = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
"Jiali/stable-diffusion-1.5",
revision="onnx",
safety_checker=None,
feature_extractor=None,
@@ -352,7 +352,7 @@ class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase):
def test_stable_diffusion_no_safety_checker(self):
pipe = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
"Jiali/stable-diffusion-1.5",
revision="onnx",
safety_checker=None,
feature_extractor=None,

View File

@@ -210,10 +210,10 @@ class OnnxStableDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase):
)
init_image = init_image.resize((768, 512))
lms_scheduler = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx"
"Jiali/stable-diffusion-1.5", subfolder="scheduler", revision="onnx"
)
pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
"Jiali/stable-diffusion-1.5",
revision="onnx",
scheduler=lms_scheduler,
safety_checker=None,

View File

@@ -68,7 +68,7 @@ class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",
"botp/stable-diffusion-v1-5-inpainting",
revision="onnx",
safety_checker=None,
feature_extractor=None,
@@ -107,10 +107,10 @@ class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
lms_scheduler = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting", subfolder="scheduler", revision="onnx"
"botp/stable-diffusion-v1-5-inpainting", subfolder="scheduler", revision="onnx"
)
pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",
"botp/stable-diffusion-v1-5-inpainting",
revision="onnx",
scheduler=lms_scheduler,
safety_checker=None,

View File

@@ -1332,7 +1332,7 @@ class StableDiffusionPipelineCkptTests(unittest.TestCase):
def test_download_from_hub(self):
ckpt_paths = [
"https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors",
"https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors",
"https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors",
]
@@ -1346,8 +1346,8 @@ class StableDiffusionPipelineCkptTests(unittest.TestCase):
assert image_out.shape == (512, 512, 3)
def test_download_local(self):
ckpt_filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.safetensors")
config_filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-inference.yaml")
ckpt_filename = hf_hub_download("Jiali/stable-diffusion-1.5", filename="v1-5-pruned-emaonly.safetensors")
config_filename = hf_hub_download("Jiali/stable-diffusion-1.5", filename="v1-inference.yaml")
pipe = StableDiffusionPipeline.from_single_file(
ckpt_filename, config_files={"v1": config_filename}, torch_dtype=torch.float16
@@ -1402,7 +1402,7 @@ class StableDiffusionPipelineNightlyTests(unittest.TestCase):
assert max_diff < 1e-3
def test_stable_diffusion_1_5_pndm(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(torch_device)
sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5").to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
@@ -1483,9 +1483,9 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
return inputs
def get_pipeline_output_without_device_map(self):
sd_pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
).to(torch_device)
sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to(
torch_device
)
sd_pipe.set_progress_bar_config(disable=True)
inputs = self.get_inputs()
no_device_map_image = sd_pipe(**inputs).images
@@ -1498,7 +1498,7 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
no_device_map_image = self.get_pipeline_output_without_device_map()
sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16
)
sd_pipe_with_device_map.set_progress_bar_config(disable=True)
inputs = self.get_inputs()
@@ -1509,7 +1509,7 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
def test_components_put_in_right_devices(self):
sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16
)
assert len(set(sd_pipe_with_device_map.hf_device_map.values())) >= 2
@@ -1518,7 +1518,7 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
no_device_map_image = self.get_pipeline_output_without_device_map()
sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
"Jiali/stable-diffusion-1.5",
device_map="balanced",
max_memory={0: "1GB", 1: "1GB"},
torch_dtype=torch.float16,
@@ -1532,7 +1532,7 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
def test_reset_device_map(self):
sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16
)
sd_pipe_with_device_map.reset_device_map()
@@ -1544,7 +1544,7 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
def test_reset_device_map_to(self):
sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16
)
sd_pipe_with_device_map.reset_device_map()
@@ -1556,7 +1556,7 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
def test_reset_device_map_enable_model_cpu_offload(self):
sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16
)
sd_pipe_with_device_map.reset_device_map()
@@ -1568,7 +1568,7 @@ class StableDiffusionPipelineDeviceMapTests(unittest.TestCase):
def test_reset_device_map_enable_sequential_cpu_offload(self):
sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16
"Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16
)
sd_pipe_with_device_map.reset_device_map()

View File

@@ -566,7 +566,7 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
assert module.device == torch.device("cpu")
def test_img2img_2nd_order(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -630,7 +630,7 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
def test_img2img_safety_checker_works(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -686,7 +686,7 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
return inputs
def test_img2img_pndm(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -701,7 +701,7 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
assert max_diff < 1e-3
def test_img2img_ddim(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -717,7 +717,7 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
assert max_diff < 1e-3
def test_img2img_lms(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -733,7 +733,7 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
assert max_diff < 1e-3
def test_img2img_dpm(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)

View File

@@ -76,7 +76,7 @@ def _test_inpaint_compile(in_queue, out_queue, timeout):
inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed)
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config)
@@ -628,7 +628,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_inpaint_ddim(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
@@ -645,7 +645,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_inpaint_fp16(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", torch_dtype=torch.float16, safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.to(torch_device)
@@ -662,7 +662,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_inpaint_pndm(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
@@ -680,7 +680,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_inpaint_k_lms(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
@@ -702,7 +702,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
torch.cuda.reset_peak_memory_stats()
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None, torch_dtype=torch.float16
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
@@ -728,7 +728,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_inpaint_pil_input_resolution_test(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
@@ -747,7 +747,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_inpaint_strength_test(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.unet.set_default_attn_processor()
@@ -767,7 +767,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
assert np.abs(expected_slice - image_slice).max() < 1e-3
def test_stable_diffusion_simple_inpaint_ddim(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
pipe = StableDiffusionInpaintPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None)
pipe.unet.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
@@ -818,7 +818,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
def test_stable_diffusion_inpaint_ddim(self):
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.vae = vae
pipe.unet.set_default_attn_processor()
@@ -840,7 +840,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
"cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16
)
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", torch_dtype=torch.float16, safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.vae = vae
@@ -860,7 +860,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
def test_stable_diffusion_inpaint_pndm(self):
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.vae = vae
@@ -881,7 +881,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
def test_stable_diffusion_inpaint_k_lms(self):
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.vae = vae
@@ -906,7 +906,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
"cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16
)
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None, torch_dtype=torch.float16
)
pipe.vae = vae
pipe.set_progress_bar_config(disable=None)
@@ -930,7 +930,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
"cross-attention/asymmetric-autoencoder-kl-x-1-5",
)
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.vae = vae
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
@@ -951,7 +951,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
def test_stable_diffusion_inpaint_strength_test(self):
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None
"botp/stable-diffusion-v1-5-inpainting", safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.vae = vae
@@ -973,7 +973,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
def test_stable_diffusion_simple_inpaint_ddim(self):
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
pipe = StableDiffusionInpaintPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None)
pipe.vae = vae
pipe.unet.set_default_attn_processor()
pipe.to(torch_device)
@@ -993,7 +993,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
vae = AsymmetricAutoencoderKL.from_pretrained(
"cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16
)
filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt")
filename = hf_hub_download("botp/stable-diffusion-v1-5-inpainting", filename="sd-v1-5-inpainting.ckpt")
pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16)
pipe.vae = vae
@@ -1042,7 +1042,7 @@ class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
return inputs
def test_inpaint_ddim(self):
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting")
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -1057,7 +1057,7 @@ class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
assert max_diff < 1e-3
def test_inpaint_pndm(self):
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting")
sd_pipe.scheduler = PNDMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -1073,7 +1073,7 @@ class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
assert max_diff < 1e-3
def test_inpaint_lms(self):
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting")
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -1089,7 +1089,7 @@ class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
assert max_diff < 1e-3
def test_inpaint_dpm(self):
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting")
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)

View File

@@ -23,7 +23,6 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
@@ -353,34 +352,6 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase):
expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506])
assert np.abs(image_slice - expected_slice).max() < 7e-3
def test_stable_diffusion_pndm(self):
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base")
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506])
assert np.abs(image_slice - expected_slice).max() < 7e-3
def test_stable_diffusion_k_lms(self):
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base")
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.10440, 0.13115, 0.11100, 0.10141, 0.11440, 0.07215, 0.11332, 0.09693, 0.10006])
assert np.abs(image_slice - expected_slice).max() < 3e-3
@require_torch_gpu
def test_stable_diffusion_attention_slicing(self):
torch.cuda.reset_peak_memory_stats()
@@ -413,124 +384,6 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase):
max_diff = numpy_cosine_similarity_distance(image.flatten(), image_sliced.flatten())
assert max_diff < 5e-3
def test_stable_diffusion_text2img_intermediate_state(self):
number_of_steps = 0
def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None:
callback_fn.has_been_called = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array(
[-0.3862, -0.4507, -1.1729, 0.0686, -1.1045, 0.7124, -1.8301, 0.1903, 1.2773]
)
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array(
[0.2720, -0.1863, -0.7383, -0.5029, -0.7534, 0.3970, -0.7646, 0.4468, 1.2686]
)
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
callback_fn.has_been_called = False
pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(torch_device, dtype=torch.float16)
pipe(**inputs, callback=callback_fn, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == inputs["num_inference_steps"]
@require_torch_gpu
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
inputs = self.get_inputs(torch_device, dtype=torch.float16)
_ = pipe(**inputs)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 2.8 GB is allocated
assert mem_bytes < 2.8 * 10**9
@require_torch_gpu
def test_stable_diffusion_pipeline_with_model_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
inputs = self.get_inputs(torch_device, dtype=torch.float16)
# Normal inference
pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base",
torch_dtype=torch.float16,
)
pipe.unet.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
outputs = pipe(**inputs)
mem_bytes = torch.cuda.max_memory_allocated()
# With model offloading
# Reload but don't move to cuda
pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base",
torch_dtype=torch.float16,
)
pipe.unet.set_default_attn_processor()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device, dtype=torch.float16)
outputs_offloaded = pipe(**inputs)
mem_bytes_offloaded = torch.cuda.max_memory_allocated()
images = outputs.images
images_offloaded = outputs_offloaded.images
max_diff = numpy_cosine_similarity_distance(images.flatten(), images_offloaded.flatten())
assert max_diff < 1e-3
assert mem_bytes_offloaded < mem_bytes
assert mem_bytes_offloaded < 3 * 10**9
for module in pipe.text_encoder, pipe.unet, pipe.vae:
assert module.device == torch.device("cpu")
# With attention slicing
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe.enable_attention_slicing()
_ = pipe(**inputs)
mem_bytes_slicing = torch.cuda.max_memory_allocated()
assert mem_bytes_slicing < mem_bytes_offloaded
@nightly
@require_torch_accelerator
@@ -554,27 +407,13 @@ class StableDiffusion2PipelineNightlyTests(unittest.TestCase):
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_stable_diffusion_2_0_default_ddim(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base").to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_2_text2img/stable_diffusion_2_0_base_ddim.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_stable_diffusion_2_1_default_pndm(self):
def test_stable_diffusion_2_1_default(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -583,70 +422,7 @@ class StableDiffusion2PipelineNightlyTests(unittest.TestCase):
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_pndm.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_stable_diffusion_ddim(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_ddim.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_stable_diffusion_lms(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_lms.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_stable_diffusion_euler(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_euler.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_stable_diffusion_dpm(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(
sd_pipe.scheduler.config, final_sigmas_type="sigma_min"
)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 25
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_dpm_multi.npy"
"/stable_diffusion_2_text2img/stable_diffusion_2_0_pndm.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3

View File

@@ -32,9 +32,6 @@ from transformers import (
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionDepth2ImgPipeline,
UNet2DConditionModel,
@@ -416,102 +413,6 @@ class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
assert np.abs(expected_slice - image_slice).max() < 6e-1
def test_stable_diffusion_depth2img_pipeline_k_lms(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
image = pipe(**inputs).images
image_slice = image[0, 253:256, 253:256, -1].flatten()
assert image.shape == (1, 480, 640, 3)
expected_slice = np.array([0.6363, 0.6274, 0.6309, 0.6370, 0.6226, 0.6286, 0.6213, 0.6453, 0.6306])
assert np.abs(expected_slice - image_slice).max() < 8e-4
def test_stable_diffusion_depth2img_pipeline_ddim(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
image = pipe(**inputs).images
image_slice = image[0, 253:256, 253:256, -1].flatten()
assert image.shape == (1, 480, 640, 3)
expected_slice = np.array([0.6424, 0.6524, 0.6249, 0.6041, 0.6634, 0.6420, 0.6522, 0.6555, 0.6436])
assert np.abs(expected_slice - image_slice).max() < 5e-4
def test_stable_diffusion_depth2img_intermediate_state(self):
number_of_steps = 0
def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None:
callback_fn.has_been_called = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 60, 80)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array(
[-0.7168, -1.5137, -0.1418, -2.9219, -2.7266, -2.4414, -2.1035, -3.0078, -1.7051]
)
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 60, 80)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array(
[-0.7109, -1.5068, -0.1403, -2.9160, -2.7207, -2.4414, -2.1035, -3.0059, -1.7090]
)
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
callback_fn.has_been_called = False
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(dtype=torch.float16)
pipe(**inputs, callback=callback_fn, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 2
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
inputs = self.get_inputs(dtype=torch.float16)
_ = pipe(**inputs)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
@nightly
@require_torch_gpu
@@ -535,14 +436,14 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
"prompt": "two tigers",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"num_inference_steps": 2,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_depth2img_pndm(self):
def test_depth2img(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
@@ -556,52 +457,3 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_depth2img_ddim(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
image = pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_depth2img/stable_diffusion_2_0_ddim.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_img2img_lms(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
image = pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_depth2img/stable_diffusion_2_0_lms.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_img2img_dpm(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
inputs["num_inference_steps"] = 30
image = pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_depth2img/stable_diffusion_2_0_dpm_multi.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3

View File

@@ -265,18 +265,37 @@ class StableDiffusion3PipelineSlowTests(unittest.TestCase):
image_slice = image[0, :10, :10]
expected_slice = np.array(
[
[0.36132812, 0.30004883, 0.25830078],
[0.36669922, 0.31103516, 0.23754883],
[0.34814453, 0.29248047, 0.23583984],
[0.35791016, 0.30981445, 0.23999023],
[0.36328125, 0.31274414, 0.2607422],
[0.37304688, 0.32177734, 0.26171875],
[0.3671875, 0.31933594, 0.25756836],
[0.36035156, 0.31103516, 0.2578125],
[0.3857422, 0.33789062, 0.27563477],
[0.3701172, 0.31982422, 0.265625],
],
dtype=np.float32,
0.4648,
0.4404,
0.4177,
0.5063,
0.4800,
0.4287,
0.5425,
0.5190,
0.4717,
0.5430,
0.5195,
0.4766,
0.5361,
0.5122,
0.4612,
0.4871,
0.4749,
0.4058,
0.4756,
0.4678,
0.3804,
0.4832,
0.4822,
0.3799,
0.5103,
0.5034,
0.3953,
0.5073,
0.4839,
0.3884,
]
)
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())

View File

@@ -238,20 +238,39 @@ class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase):
image_slice = image[0, :10, :10]
expected_slice = np.array(
[
[0.50097656, 0.44726562, 0.40429688],
[0.5048828, 0.45703125, 0.38110352],
[0.4987793, 0.45141602, 0.38134766],
[0.49682617, 0.45336914, 0.38354492],
[0.49804688, 0.4555664, 0.39379883],
[0.5083008, 0.4645996, 0.40039062],
[0.50341797, 0.46240234, 0.39770508],
[0.49926758, 0.4572754, 0.39575195],
[0.50634766, 0.46435547, 0.39794922],
[0.50341797, 0.4572754, 0.39746094],
],
dtype=np.float32,
0.5435,
0.4673,
0.5732,
0.4438,
0.3557,
0.4912,
0.4331,
0.3491,
0.4915,
0.4287,
0.3477,
0.4849,
0.4355,
0.3469,
0.4871,
0.4431,
0.3538,
0.4912,
0.4521,
0.3643,
0.5059,
0.4587,
0.3730,
0.5166,
0.4685,
0.3845,
0.5264,
0.4746,
0.3914,
0.5342,
]
)
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4, f"Outputs are not close enough, got {image_slice}"
assert max_diff < 1e-4, f"Outputs are not close enough, got {max_diff}"

View File

@@ -607,175 +607,14 @@ class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_adapter_color(self):
adapter_model = "TencentARC/t2iadapter_color_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "snail"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png"
)
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_depth(self):
adapter_model = "TencentARC/t2iadapter_depth_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "snail"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png"
)
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_depth_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_depth_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "desk"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_depth_sd_v15(self):
adapter_model = "TencentARC/t2iadapter_depth_sd15v2"
sd_model = "runwayml/stable-diffusion-v1-5"
sd_model = "Jiali/stable-diffusion-1.5"
prompt = "desk"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd15v2.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_keypose_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_keypose_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "person"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/person_keypose.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_keypose_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_openpose_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_openpose_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "person"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/iron_man_pose.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_openpose_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_seg_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_seg_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "motorcycle"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png"
)
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_seg_sd14v1.npy"
out_url = "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_adapter/sd_adapter_v15_zoe_depth.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
@@ -797,11 +636,11 @@ class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_adapter_zoedepth_sd_v15(self):
adapter_model = "TencentARC/t2iadapter_zoedepth_sd15v1"
sd_model = "runwayml/stable-diffusion-v1-5"
sd_model = "Jiali/stable-diffusion-1.5"
prompt = "motorcycle"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motorcycle.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_zoedepth_sd15v1.npy"
out_url = "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_adapter/sd_adapter_v15_zoe_depth.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
@@ -819,70 +658,13 @@ class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_canny_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_canny_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "toy"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png"
input_channels = 1
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_canny_sd_v15(self):
adapter_model = "TencentARC/t2iadapter_canny_sd15v2"
sd_model = "runwayml/stable-diffusion-v1-5"
sd_model = "Jiali/stable-diffusion-1.5"
prompt = "toy"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png"
input_channels = 1
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd15v2.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_sketch_sd14(self):
adapter_model = "TencentARC/t2iadapter_sketch_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "cat"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png"
)
input_channels = 1
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd14v1.npy"
out_url = "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_adapter/sd_adapter_v15_zoe_depth.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
@@ -906,7 +688,7 @@ class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
def test_stable_diffusion_adapter_sketch_sd15(self):
adapter_model = "TencentARC/t2iadapter_sketch_sd15v2"
sd_model = "runwayml/stable-diffusion-v1-5"
sd_model = "Jiali/stable-diffusion-1.5"
prompt = "cat"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png"
@@ -933,25 +715,3 @@ class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_seg_sd14v1")
pipe = StableDiffusionAdapterPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", adapter=adapter, safety_checker=None
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png"
)
pipe(prompt="foo", image=image, num_inference_steps=2)
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 5 * 10**9

View File

@@ -277,7 +277,7 @@ class SafeDiffusionPipelineIntegrationTests(unittest.TestCase):
torch.cuda.empty_cache()
def test_harm_safe_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None)
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -338,7 +338,7 @@ class SafeDiffusionPipelineIntegrationTests(unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_nudity_safe_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None)
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
@@ -392,7 +392,7 @@ class SafeDiffusionPipelineIntegrationTests(unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_nudity_safetychecker_safe_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)

View File

@@ -58,7 +58,7 @@ class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
[prompt],
generator=generator,
guidance_scale=9.0,
num_inference_steps=20,
num_inference_steps=2,
height=512,
width=512,
output_type="np",
@@ -69,9 +69,7 @@ class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array(
[0.79600024, 0.796546, 0.80682373, 0.79428387, 0.7905743, 0.8008807, 0.786183, 0.7835959, 0.797892]
)
expected_slice = np.array([0.5420, 0.5038, 0.2439, 0.5371, 0.4660, 0.1906, 0.5221, 0.4290, 0.2566])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@@ -90,7 +88,7 @@ class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
[prompt],
generator=generator,
guidance_scale=7.5,
num_inference_steps=15,
num_inference_steps=2,
output_type="np",
use_karras_sigmas=True,
height=512,
@@ -102,9 +100,7 @@ class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array(
[0.9506951, 0.9527786, 0.95309967, 0.9511477, 0.952523, 0.9515326, 0.9511933, 0.9480397, 0.94930184]
)
expected_slice = np.array([0.6418, 0.6424, 0.6462, 0.6271, 0.6314, 0.6295, 0.6249, 0.6339, 0.6335])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@@ -124,7 +120,7 @@ class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=20,
num_inference_steps=2,
output_type="np",
height=512,
width=512,
@@ -134,7 +130,7 @@ class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=20,
num_inference_steps=2,
output_type="np",
height=512,
width=512,

View File

@@ -40,7 +40,7 @@ from diffusers.utils.testing_utils import slow
PRETRAINED_MODEL_REPO_MAPPING = OrderedDict(
[
("stable-diffusion", "runwayml/stable-diffusion-v1-5"),
("stable-diffusion", "Jiali/stable-diffusion-1.5"),
("if", "DeepFloyd/IF-I-XL-v1.0"),
("kandinsky", "kandinsky-community/kandinsky-2-1"),
("kandinsky22", "kandinsky-community/kandinsky-2-2-decoder"),
@@ -539,7 +539,7 @@ class AutoPipelineIntegrationTest(unittest.TestCase):
def test_controlnet(self):
# test from_pretrained
model_repo = "runwayml/stable-diffusion-v1-5"
model_repo = "Jiali/stable-diffusion-1.5"
controlnet_repo = "lllyasviel/sd-controlnet-canny"
controlnet = ControlNetModel.from_pretrained(controlnet_repo, torch_dtype=torch.float16)

View File

@@ -40,7 +40,7 @@ class TextToVideoZeroPipelineSlowTests(unittest.TestCase):
torch.cuda.empty_cache()
def test_full_model(self):
model_id = "runwayml/stable-diffusion-v1-5"
model_id = "Jiali/stable-diffusion-1.5"
pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
generator = torch.Generator(device="cuda").manual_seed(0)

View File

@@ -30,11 +30,11 @@ enable_full_determinism()
@require_torch_gpu
class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionControlNetPipeline
ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_pruned.safetensors"
ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors"
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "Lykon/dreamshaper-8"
repo_id = "Jiali/stable-diffusion-1.5"
def setUp(self):
super().setUp()

View File

@@ -29,9 +29,9 @@ enable_full_determinism()
@require_torch_gpu
class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionControlNetInpaintPipeline
ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_INPAINTING.inpainting.safetensors"
ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt"
original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml"
repo_id = "Lykon/dreamshaper-8-inpainting"
repo_id = "botp/stable-diffusion-v1-5-inpainting"
def setUp(self):
super().setUp()

View File

@@ -29,11 +29,11 @@ enable_full_determinism()
@require_torch_gpu
class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionControlNetPipeline
ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_pruned.safetensors"
ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors"
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "Lykon/dreamshaper-8"
repo_id = "Jiali/stable-diffusion-1.5"
def setUp(self):
super().setUp()

View File

@@ -23,11 +23,11 @@ enable_full_determinism()
@require_torch_gpu
class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionImg2ImgPipeline
ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_pruned.safetensors"
ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors"
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "Lykon/dreamshaper-8"
repo_id = "Jiali/stable-diffusion-1.5"
def setUp(self):
super().setUp()

View File

@@ -23,9 +23,9 @@ enable_full_determinism()
@require_torch_gpu
class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInpaintPipeline
ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_INPAINTING.inpainting.safetensors"
ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt"
original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml"
repo_id = "Lykon/dreamshaper-8-inpainting"
repo_id = "botp/stable-diffusion-v1-5-inpainting"
def setUp(self):
super().setUp()
@@ -63,7 +63,7 @@ class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSin
def test_single_file_loading_4_channel_unet(self):
# Test loading single file inpaint with a 4 channel UNet
ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_pruned.safetensors"
ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors"
pipe = self.pipeline_class.from_single_file(ckpt_path)
assert pipe.unet.config.in_channels == 4

View File

@@ -26,11 +26,11 @@ enable_full_determinism()
@require_torch_gpu
class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionPipeline
ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_pruned.safetensors"
ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors"
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "Lykon/dreamshaper-8"
repo_id = "Jiali/stable-diffusion-1.5"
def setUp(self):
super().setUp()