1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Skip PEFT LoRA Scaling if the scale is 1.0 (#7576)

* Skip scaling if scale is identity

* move check for weight one to scale and unscale lora

* fix code style/quality

* Empty-Commit

---------

Co-authored-by: Steven Munn <stevenjmunn@gmail.com>
Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
Co-authored-by: Steven Munn <5297082+stevenjlm@users.noreply.github.com>
This commit is contained in:
Steven Munn
2024-04-10 22:32:31 -07:00
committed by GitHub
parent 33c5d125cb
commit 42f25d601a

View File

@@ -64,9 +64,11 @@ def recurse_remove_peft_layers(model):
module_replaced = False
if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear):
new_module = torch.nn.Linear(module.in_features, module.out_features, bias=module.bias is not None).to(
module.weight.device
)
new_module = torch.nn.Linear(
module.in_features,
module.out_features,
bias=module.bias is not None,
).to(module.weight.device)
new_module.weight = module.weight
if module.bias is not None:
new_module.bias = module.bias
@@ -110,6 +112,9 @@ def scale_lora_layers(model, weight):
"""
from peft.tuners.tuners_utils import BaseTunerLayer
if weight == 1.0:
return
for module in model.modules():
if isinstance(module, BaseTunerLayer):
module.scale_layer(weight)
@@ -129,6 +134,9 @@ def unscale_lora_layers(model, weight: Optional[float] = None):
"""
from peft.tuners.tuners_utils import BaseTunerLayer
if weight == 1.0:
return
for module in model.modules():
if isinstance(module, BaseTunerLayer):
if weight is not None and weight != 0: