diff --git a/src/diffusers/models/transformers/transformer_bria_fibo.py b/src/diffusers/models/transformers/transformer_bria_fibo.py index 343a7136a3..a4d33a2332 100644 --- a/src/diffusers/models/transformers/transformer_bria_fibo.py +++ b/src/diffusers/models/transformers/transformer_bria_fibo.py @@ -66,7 +66,7 @@ def _get_qkv_projections(attn: "BriaFiboAttention", hidden_states, encoder_hidde return _get_projections(attn, hidden_states, encoder_hidden_states) -# Copied from diffusers.models.transformers.transformer_flux.FluxAttnProcessor FluxAttnProcessor->BriaFiboAttnProcessor, FluxAttention-> BriaFiboAttention +# Inspired by from diffusers.models.transformers.transformer_flux.FluxAttnProcessor FluxAttnProcessor->BriaFiboAttnProcessor, FluxAttention-> BriaFiboAttention class BriaFiboAttnProcessor: _attention_backend = None _parallel_config = None @@ -134,7 +134,7 @@ class BriaFiboAttnProcessor: return hidden_states -# Copied from diffusers.models.transformers.transformer_flux.FluxAttention -> BriaFiboAttention +# Inspired by from diffusers.models.transformers.transformer_flux.FluxAttention -> BriaFiboAttention class BriaFiboAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = BriaFiboAttnProcessor _available_processors = [ diff --git a/src/diffusers/pipelines/bria_fibo/pipeline_bria_fibo.py b/src/diffusers/pipelines/bria_fibo/pipeline_bria_fibo.py index 97e98c5f8d..ee7f9e20c0 100644 --- a/src/diffusers/pipelines/bria_fibo/pipeline_bria_fibo.py +++ b/src/diffusers/pipelines/bria_fibo/pipeline_bria_fibo.py @@ -353,7 +353,7 @@ class BriaFiboPipeline(DiffusionPipeline): return self._interrupt @staticmethod - # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents + # Inspired by from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents FluxPipeline-> BriaFiboPipeline _unpack_latents def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape @@ -400,7 +400,7 @@ class BriaFiboPipeline(DiffusionPipeline): return latents @staticmethod - # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline + # Inspired by from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5)