diff --git a/src/diffusers/pipelines/qwenimage/__init__.py b/src/diffusers/pipelines/qwenimage/__init__.py index 0cd9ab40e8..2400632ba2 100644 --- a/src/diffusers/pipelines/qwenimage/__init__.py +++ b/src/diffusers/pipelines/qwenimage/__init__.py @@ -27,8 +27,8 @@ else: _import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"] _import_structure["pipeline_qwenimage_controlnet_inpaint"] = ["QwenImageControlNetInpaintPipeline"] _import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"] - _import_structure["pipeline_qwenimage_edit_plus"] = ["QwenImageEditPlusPipeline"] _import_structure["pipeline_qwenimage_edit_inpaint"] = ["QwenImageEditInpaintPipeline"] + _import_structure["pipeline_qwenimage_edit_plus"] = ["QwenImageEditPlusPipeline"] _import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"] _import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"] diff --git a/tests/pipelines/kandinsky/test_kandinsky.py b/tests/pipelines/kandinsky/test_kandinsky.py index 911c6e49ae..9fa39b1bf5 100644 --- a/tests/pipelines/kandinsky/test_kandinsky.py +++ b/tests/pipelines/kandinsky/test_kandinsky.py @@ -18,11 +18,13 @@ import random import unittest import numpy as np +import pytest import torch from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP +from diffusers.utils import is_transformers_version from ...testing_utils import ( backend_empty_cache, @@ -215,6 +217,9 @@ class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): dummy = Dummies() return dummy.get_dummy_inputs(device=device, seed=seed) + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True + ) def test_kandinsky(self): device = "cpu" diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index d744d10821..ca80461d87 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -16,8 +16,10 @@ import unittest import numpy as np +import pytest from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline +from diffusers.utils import is_transformers_version from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ..test_pipelines_common import PipelineTesterMixin @@ -73,6 +75,9 @@ class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase) ) return inputs + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True + ) def test_kandinsky(self): device = "cpu" @@ -181,6 +186,9 @@ class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.Te inputs.pop("negative_image_embeds") return inputs + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True + ) def test_kandinsky(self): device = "cpu" @@ -292,6 +300,9 @@ class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.Te inputs.pop("negative_image_embeds") return inputs + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True + ) def test_kandinsky(self): device = "cpu" diff --git a/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/tests/pipelines/kandinsky/test_kandinsky_img2img.py index 4074c8db22..6bcd9587f2 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky/test_kandinsky_img2img.py @@ -18,6 +18,7 @@ import random import unittest import numpy as np +import pytest import torch from PIL import Image from transformers import XLMRobertaTokenizerFast @@ -31,6 +32,7 @@ from diffusers import ( VQModel, ) from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP +from diffusers.utils import is_transformers_version from ...testing_utils import ( backend_empty_cache, @@ -237,6 +239,9 @@ class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True + ) def test_kandinsky_img2img(self): device = "cpu" diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index b789a63cdd..6383ca71ef 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -18,12 +18,14 @@ import random import unittest import numpy as np +import pytest import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP +from diffusers.utils import is_transformers_version from ...testing_utils import ( backend_empty_cache, @@ -231,6 +233,9 @@ class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True + ) def test_kandinsky_inpaint(self): device = "cpu"