1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-08-08 17:42:12 +03:00
Files
glibc/sysdeps/aarch64/setjmp.S
Yury Khrustalev c0f0db2d59 aarch64: simplify calls to __libc_arm_za_disable in assembly
There is no functional change in this patch.

We remove stores and loads to stack, return address signing, and redundant
CFI directives before and after call to __libc_arm_za_disable().

The __libc_arm_za_disable implementation follows special calling convention
that allows to avoid most of the operations that would be necessary for a
call to a normal function (see [1] for details).

First, we rely on __libc_arm_za_disable() not clobbering certain registers,
and we put return address into one of these registers. Now we don't need
to store it on stack, so we don't need to sign return address using PAC.

Second, as a result of the above, we don't need to update the CFI offset.

This patch provides small optimisation avoiding unnecessary store and load
on stack also simplifies assembly code and CFI directives.

[1]: https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
2025-06-18 09:42:33 +01:00

96 lines
2.6 KiB
ArmAsm

/* Copyright (C) 1997-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <pointer_guard.h>
#include <jmpbuf-offsets.h>
#include <stap-probe.h>
/* Keep traditional entry points in with sigsetjmp(). */
ENTRY (setjmp)
mov x1, #1
b 1f
END (setjmp)
ENTRY_ALIGN (_setjmp, 2)
mov x1, #0
b 1f
END (_setjmp)
libc_hidden_def (_setjmp)
ENTRY_ALIGN (__sigsetjmp, 2)
1:
#if IS_IN(libc)
/* Disable ZA state of SME in libc.a and libc.so, but not in ld.so.
The calling convention of __libc_arm_za_disable allows to do
this thus allowing to avoid saving to and reading from stack.
As a result we also don't need to sign the return address and
check it after returning because it is not stored to stack. */
mov x13, x30
cfi_register (x30, x13)
bl __libc_arm_za_disable
mov x30, x13
cfi_register (x13, x30)
#endif
stp x19, x20, [x0, #JB_X19<<3]
stp x21, x22, [x0, #JB_X21<<3]
stp x23, x24, [x0, #JB_X23<<3]
stp x25, x26, [x0, #JB_X25<<3]
stp x27, x28, [x0, #JB_X27<<3]
#ifdef PTR_MANGLE
PTR_MANGLE (x4, x30, x3)
stp x29, x4, [x0, #JB_X29<<3]
#else
stp x29, x30, [x0, #JB_X29<<3]
#endif
/* setjmp probe takes 3 arguments, address of jump buffer
first argument (8@x0), return value second argument (-4@x1),
and target address (8@x30), respectively. */
LIBC_PROBE (setjmp, 3, 8@x0, -4@x1, 8@x30)
stp d8, d9, [x0, #JB_D8<<3]
stp d10, d11, [x0, #JB_D10<<3]
stp d12, d13, [x0, #JB_D12<<3]
stp d14, d15, [x0, #JB_D14<<3]
/* GCS support. */
mov x16, 1
CHKFEAT_X16
tbnz x16, 0, L(gcs_done)
MRS_GCSPR (x2)
add x2, x2, 8 /* GCS state right after setjmp returns. */
str x2, [x0, #JB_GCSPR]
L(gcs_done):
mov x2, sp
#ifdef PTR_MANGLE
PTR_MANGLE (x2, x2, x3)
#endif
str x2, [x0, #JB_SP<<3]
#if IS_IN (rtld)
/* In ld.so we never save the signal mask */
mov w0, #0
ret
#else
b C_SYMBOL_NAME(__sigjmp_save)
#endif
END (__sigsetjmp)
hidden_def (__sigsetjmp)