1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Refactor documentation comments in BriaFibo classes to indicate inspiration from existing implementations

- Updated comments in BriaFiboAttnProcessor, BriaFiboAttention, and BriaFiboPipeline to reflect that the code is inspired by other modules rather than copied.
- Enhanced clarity on the origins of the methods to maintain proper attribution.
This commit is contained in:
galbria
2025-10-28 10:01:07 +00:00
parent 455ae70b11
commit 94abe1c3b9
2 changed files with 4 additions and 4 deletions

View File

@@ -66,7 +66,7 @@ def _get_qkv_projections(attn: "BriaFiboAttention", hidden_states, encoder_hidde
return _get_projections(attn, hidden_states, encoder_hidden_states)
# Copied from diffusers.models.transformers.transformer_flux.FluxAttnProcessor FluxAttnProcessor->BriaFiboAttnProcessor, FluxAttention-> BriaFiboAttention
# Inspired by from diffusers.models.transformers.transformer_flux.FluxAttnProcessor FluxAttnProcessor->BriaFiboAttnProcessor, FluxAttention-> BriaFiboAttention
class BriaFiboAttnProcessor:
_attention_backend = None
_parallel_config = None
@@ -134,7 +134,7 @@ class BriaFiboAttnProcessor:
return hidden_states
# Copied from diffusers.models.transformers.transformer_flux.FluxAttention -> BriaFiboAttention
# Inspired by from diffusers.models.transformers.transformer_flux.FluxAttention -> BriaFiboAttention
class BriaFiboAttention(torch.nn.Module, AttentionModuleMixin):
_default_processor_cls = BriaFiboAttnProcessor
_available_processors = [

View File

@@ -353,7 +353,7 @@ class BriaFiboPipeline(DiffusionPipeline):
return self._interrupt
@staticmethod
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents
# Inspired by from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents FluxPipeline-> BriaFiboPipeline _unpack_latents
def _unpack_latents(latents, height, width, vae_scale_factor):
batch_size, num_patches, channels = latents.shape
@@ -400,7 +400,7 @@ class BriaFiboPipeline(DiffusionPipeline):
return latents
@staticmethod
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline
# Inspired by from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline
def _pack_latents(latents, batch_size, num_channels_latents, height, width):
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
latents = latents.permute(0, 2, 4, 1, 3, 5)