1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

style fixs

This commit is contained in:
Gal Davidi
2025-10-26 16:46:42 +00:00
parent 9e253a7bb7
commit 371e5f511e
6 changed files with 12 additions and 22 deletions

View File

@@ -198,8 +198,8 @@ else:
"AutoencoderOobleck",
"AutoencoderTiny",
"AutoModel",
"BriaTransformer2DModel",
"BriaFiboTransformer2DModel",
"BriaTransformer2DModel",
"CacheMixin",
"ChromaTransformer2DModel",
"CogVideoXTransformer3DModel",
@@ -393,8 +393,8 @@ except OptionalDependencyNotAvailable:
else:
_import_structure["modular_pipelines"].extend(
[
"BriaFiboVLMPromptToJson",
"BriaFiboGeminiPromptToJson",
"BriaFiboVLMPromptToJson",
"FluxAutoBlocks",
"FluxKontextAutoBlocks",
"FluxKontextModularPipeline",
@@ -433,8 +433,8 @@ else:
"AuraFlowPipeline",
"BlipDiffusionControlNetPipeline",
"BlipDiffusionPipeline",
"BriaPipeline",
"BriaFiboPipeline",
"BriaPipeline",
"ChromaImg2ImgPipeline",
"ChromaPipeline",
"CLIPImageProjection",
@@ -905,8 +905,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
AutoencoderOobleck,
AutoencoderTiny,
AutoModel,
BriaTransformer2DModel,
BriaFiboTransformer2DModel,
BriaTransformer2DModel,
CacheMixin,
ChromaTransformer2DModel,
CogVideoXTransformer3DModel,
@@ -1108,8 +1108,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
AudioLDM2UNet2DConditionModel,
AudioLDMPipeline,
AuraFlowPipeline,
BriaPipeline,
BriaFiboPipeline,
BriaPipeline,
ChromaImg2ImgPipeline,
ChromaPipeline,
CLIPImageProjection,

View File

@@ -175,8 +175,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
from .transformers import (
AllegroTransformer3DModel,
AuraFlowTransformer2DModel,
BriaTransformer2DModel,
BriaFiboTransformer2DModel,
BriaTransformer2DModel,
ChromaTransformer2DModel,
CogVideoXTransformer3DModel,
CogView3PlusTransformer2DModel,

View File

@@ -21,8 +21,8 @@ except OptionalDependencyNotAvailable:
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["gemini_prompt_to_json"] = ["BriaFiboGeminiPromptToJson"]
_import_structure["fibo_vlm_prompt_to_json"] = ["BriaFiboVLMPromptToJson"]
_import_structure["gemini_prompt_to_json"] = ["BriaFiboGeminiPromptToJson"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
@@ -31,8 +31,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .gemini_prompt_to_json import BriaFiboGeminiPromptToJson
from .fibo_vlm_prompt_to_json import BriaFiboVLMPromptToJson
from .gemini_prompt_to_json import BriaFiboGeminiPromptToJson
else:
import sys

View File

@@ -277,16 +277,12 @@ def build_messages(
if refine_image is None:
base_prompt = (structured_prompt or "").strip()
edits = (editing_instructions or "").strip()
formatted = textwrap.dedent(
f"""<refine> Input: {base_prompt} Editing instructions: {edits}"""
).strip()
formatted = textwrap.dedent(f"""<refine> Input: {base_prompt} Editing instructions: {edits}""").strip()
user_content.append({"type": "text", "text": formatted})
else:
user_content.append({"type": "image", "image": refine_image})
edits = (editing_instructions or "").strip()
formatted = textwrap.dedent(
f"""<refine> Editing instructions: {edits}"""
).strip()
formatted = textwrap.dedent(f"""<refine> Editing instructions: {edits}""").strip()
user_content.append({"type": "text", "text": formatted})
messages: List[Dict[str, Any]] = []

View File

@@ -18,6 +18,7 @@ import unittest
import torch
from diffusers import BriaFiboTransformer2DModel
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
@@ -25,8 +26,6 @@ from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTest
enable_full_determinism()
class BriaFiboTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = BriaFiboTransformer2DModel
main_input_name = "hidden_states"
@@ -57,7 +56,7 @@ class BriaFiboTransformerTests(ModelTesterMixin, unittest.TestCase):
"img_ids": image_ids,
"txt_ids": text_ids,
"timestep": timestep,
"text_encoder_layers": [encoder_hidden_states[:,:,:32], encoder_hidden_states[:,:,:32]],
"text_encoder_layers": [encoder_hidden_states[:, :, :32], encoder_hidden_states[:, :, :32]],
}
@property

View File

@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
@@ -30,10 +29,8 @@ from diffusers.models.transformers.transformer_bria_fibo import BriaFiboTransfor
from tests.pipelines.test_pipelines_common import PipelineTesterMixin, to_np
from ...testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
@@ -194,5 +191,3 @@ class BriaFiboPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def test_save_load_dduf(self):
pass