1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-30 22:43:12 +03:00

Rewritten v9/64-bit sparc strcmp.

This commit is contained in:
David S. Miller
2011-08-24 01:32:24 -07:00
parent 39dd69dfb9
commit ad69cc2652
2 changed files with 136 additions and 210 deletions

View File

@ -1,3 +1,7 @@
2011-08-24 David S. Miller <davem@davemloft.net>
* sysdeps/sparc/sparc64/strcmp.S: Rewrite.
2011-08-24 Andreas Schwab <schwab@redhat.com> 2011-08-24 Andreas Schwab <schwab@redhat.com>
* elf/Makefile: Add rules to build and run unload8 test. * elf/Makefile: Add rules to build and run unload8 test.

View File

@ -1,9 +1,8 @@
/* Compare two strings for differences. /* Compare two strings for differences.
For SPARC v9. For SPARC v9.
Copyright (C) 1997, 1999, 2003 Free Software Foundation, Inc. Copyright (C) 2011 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and Contributed by David S. Miller <davem@davemloft.net>
Jakub Jelinek <jj@ultra.linux.cz>.
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public modify it under the terms of the GNU Lesser General Public
@ -22,259 +21,182 @@
#include <sysdep.h> #include <sysdep.h>
#include <asm/asi.h> #include <asm/asi.h>
#ifndef XCC #ifndef XCC
.register %g2, #scratch .register %g2, #scratch
.register %g3, #scratch .register %g3, #scratch
.register %g6, #scratch .register %g6, #scratch
#endif #endif
/* Normally, this uses #define rSTR1 %o0
((xword - 0x0101010101010101) & 0x8080808080808080) test #define rSTR2 %o1
to find out if any byte in xword could be zero. This is fast, but #define r0101 %o2 /* 0x0101010101010101 */
also gives false alarm for any byte in range 0x81-0xff. It does #define r8080 %o3 /* 0x8080808080808080 */
not matter for correctness, as if this test tells us there could #define rSTRXOR %o4
be some zero byte, we check it byte by byte, but if bytes with #define rWORD1 %o5
high bits set are common in the strings, then this will give poor #define rTMP1 %g1
performance. You can #define EIGHTBIT_NOT_RARE and the algorithm #define rTMP2 %g2
will use one tick slower, but more precise test #define rWORD2 %g3
((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080), #define rSLL %g4
which does not give any false alarms (but if some bits are set, #define rSRL %g5
one cannot assume from it which bytes are zero and which are not). #define rBARREL %g6
It is yet to be measured, what is the correct default for glibc
in these days for an average user. /* There are two cases, either the two pointers are aligned
* identically or they are not. If they have the same
* alignment we can use the normal full speed loop. Otherwise
* we have to use the barrel-shifter version.
*/ */
.text .text
.align 32 .align 32
ENTRY(strcmp) ENTRY(strcmp)
sethi %hi(0x01010101), %g1 /* IEU0 Group */ or rSTR2, rSTR1, rTMP1
andcc %o0, 7, %g0 /* IEU1 */ sethi %hi(0x80808080), r8080
bne,pn %icc, 7f /* CTI */
or %g1, %lo(0x01010101), %g1 /* IEU0 Group */
andcc %o1, 7, %g3 /* IEU1 */ andcc rTMP1, 0x7, %g0
bne,pn %icc, 9f /* CTI */ bne,pn %icc, .Lmaybe_barrel_shift
sllx %g1, 32, %g2 /* IEU0 Group */ or r8080, %lo(0x80808080), r8080
ldx [%o0], %o2 /* Load */ ldx [rSTR1], rWORD1
or %g1, %g2, %g1 /* IEU0 Group */ sub rSTR2, rSTR1, rSTR2
1: ldx [%o1], %o3 /* Load */ sllx r8080, 32, rTMP1
sub %o1, %o0, %o1 /* IEU1 */
sllx %g1, 7, %g2 /* IEU0 Group */
2: add %o0, 8, %o0 /* IEU1 */ ldx [rSTR1 + rSTR2], rWORD2
sub %o2, %g1, %g3 /* IEU0 Group */ or r8080, rTMP1, r8080
subcc %o2, %o3, %g0 /* IEU1 */
bne,pn %xcc, 13f /* CTI */
#ifdef EIGHTBIT_NOT_RARE ba,pt %xcc, .Laligned_loop_entry
andn %g3, %o2, %g4 /* IEU0 Group */ srlx r8080, 7, r0101
ldxa [%o0] ASI_PNF, %o2 /* Load */
andcc %g4, %g2, %g0 /* IEU1 Group */
#else
ldxa [%o0] ASI_PNF, %o2 /* Load Group */
andcc %g3, %g2, %g0 /* IEU1 */
#endif
be,a,pt %xcc, 2b /* CTI */
ldxa [%o1 + %o0] ASI_PNF, %o3 /* Load Group */
addcc %g3, %g1, %o4 /* IEU1 */
srlx %g3, 32, %g3 /* IEU0 */
andcc %g3, %g2, %g0 /* IEU1 Group */
be,pt %xcc, 3f /* CTI */
srlx %o4, 56, %o5 /* IEU0 */
andcc %o5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4f /* CTI */
srlx %o4, 48, %o5 /* IEU0 */
andcc %o5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4f /* CTI */
srlx %o4, 40, %o5 /* IEU0 */
andcc %o5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4f /* CTI */
srlx %o4, 32, %o5 /* IEU0 */
andcc %o5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4f /* CTI */
3: srlx %o4, 24, %o5 /* IEU0 */
andcc %o5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4f /* CTI */
srlx %o4, 16, %o5 /* IEU0 */
andcc %o5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4f /* CTI */
srlx %o4, 8, %o5 /* IEU0 */
andcc %o5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4f /* CTI */
andcc %o4, 0xff, %g0 /* IEU1 Group */
bne,a,pn %icc, 2b /* CTI */
ldxa [%o1 + %o0] ASI_PNF, %o3 /* Load */
4: retl /* CTI+IEU1 Group */
clr %o0 /* IEU0 */
.align 32 .align 32
13: mov 0xff, %g6 /* IEU0 Group */ .Laligned_loop_entry:
#ifdef EIGHTBIT_NOT_RARE .Laligned_loop:
andcc %g4, %g2, %g0 /* IEU1 */ add rSTR1, 8, rSTR1
#else
andcc %g3, %g2, %g0 /* IEU1 */
#endif
be,pt %xcc, 25f /* CTI */
addcc %g3, %g1, %o4 /* IEU1 Group */
srlx %g3, 32, %g3 /* IEU0 */ sub rWORD1, r0101, rTMP2
andcc %g3, %g2, %g0 /* IEU1 Group */ xorcc rWORD1, rWORD2, rSTRXOR
be,pt %xcc, 23f /* CTI */ bne,pn %xcc, .Lcommon_endstring
sllx %g6, 56, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */ andn r8080, rWORD1, rTMP1
be,pn %xcc, 24f /* CTI */
sllx %g6, 48, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %xcc, 24f /* CTI */ ldxa [rSTR1] ASI_PNF, rWORD1
sllx %g6, 40, %o5 /* IEU0 */ andcc rTMP1, rTMP2, %g0
andcc %o4, %o5, %g0 /* IEU1 Group */ be,a,pt %xcc, .Laligned_loop
be,pn %xcc, 24f /* CTI */
sllx %g6, 32, %o5 /* IEU0 */ ldxa [rSTR1 + rSTR2] ASI_PNF, rWORD2
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %xcc, 24f /* CTI */
23: sllx %g6, 24, %o5 /* IEU0 */
andcc %o4, %o5, %g0 /* IEU1 Group */ .Lcommon_equal:
be,pn %icc, 24f /* CTI */ retl
sllx %g6, 16, %o5 /* IEU0 */ mov 0, %o0
andcc %o4, %o5, %g0 /* IEU1 Group */
be,pn %icc, 24f /* CTI */ /* All loops terminate here once they find an unequal word.
sllx %g6, 8, %o5 /* IEU0 */ * If a zero byte appears in the word before the first unequal
andcc %o4, %o5, %g0 /* IEU1 Group */ * byte, we must report zero. Otherwise we report '1' or '-1'
be,pn %icc, 24f /* CTI */ * depending upon whether the first mis-matching byte is larger
* in the first string or the second, respectively.
*
* First we compute a 64-bit mask value that has "0x01" in
* each byte where a zero exists in rWORD1. rSTRXOR holds the
* value (rWORD1 ^ rWORD2). Therefore, if considered as an
* unsigned quantity, our "0x01" mask value is "greater than"
* rSTRXOR then a zero terminating byte comes first and
* therefore we report '0'.
*
* The formula for this mask is:
*
* mask_tmp1 = ~rWORD1 & 0x8080808080808080;
* mask_tmp2 = ((rWORD1 & 0x7f7f7f7f7f7f7f7f) +
* 0x7f7f7f7f7f7f7f7f);
*
* mask = ((mask_tmp1 & ~mask_tmp2) >> 7);
*/
.Lcommon_endstring:
andn rWORD1, r8080, rTMP2
or r8080, 1, %o1
mov %g6, %o5 /* IEU0 */ mov 1, %o0
25: cmp %o4, %o3 /* IEU1 Group */ sub rTMP2, %o1, rTMP2
5: mov -1, %o0 /* IEU0 */
retl /* CTI+IEU1 Group */
movgu %xcc, 1, %o0 /* Single Group */ cmp rWORD1, rWORD2
andn rTMP1, rTMP2, rTMP1
.align 16 movleu %xcc, -1, %o0
24: sub %o5, 1, %g6 /* IEU0 Group */ srlx rTMP1, 7, rTMP1
clr %o0 /* IEU1 */
or %o5, %g6, %o5 /* IEU0 Group */
andn %o4, %o5, %o4 /* IEU0 Group */
andn %o3, %o5, %o3 /* IEU1 */ cmp rTMP1, rSTRXOR
cmp %o4, %o3 /* IEU1 Group */ retl
movgu %xcc, 1, %o0 /* Single Group */ movgu %xcc, 0, %o0
retl /* CTI+IEU1 Group */
movlu %xcc, -1, %o0 /* Single Group */ .Lmaybe_barrel_shift:
6: retl /* CTI+IEU1 Group */ sub rSTR2, rSTR1, rSTR2
mov %o4, %o0 /* IEU0 */ sllx r8080, 32, rTMP1
.align 16 or r8080, rTMP1, r8080
7: ldub [%o0], %o2 /* Load */ and rSTR1, 0x7, rTMP2
add %o0, 1, %o0 /* IEU1 */
ldub [%o1], %o3 /* Load Group */
sllx %g1, 32, %g2 /* IEU0 */
8: add %o1, 1, %o1 /* IEU1 */ srlx r8080, 7, r0101
subcc %o2, %o3, %o4 /* IEU1 Group */ andn rSTR1, 0x7, rSTR1
bne,pn %xcc, 6b /* CTI */
lduba [%o0] ASI_PNF, %o2 /* Load */
brz,pn %o3, 4b /* CTI+IEU1 Group */ ldxa [rSTR1] ASI_PNF, rWORD1
lduba [%o1] ASI_PNF, %o3 /* Load */ andcc rSTR2, 0x7, rSLL
andcc %o0, 7, %g0 /* IEU1 Group */ sll rTMP2, 3, rSTRXOR
bne,a,pn %icc, 8b /* CTI */
add %o0, 1, %o0 /* IEU0 */ bne,pn %icc, .Lneed_barrel_shift
or %g1, %g2, %g1 /* IEU0 Group */ mov -1, rTMP1
andcc %o1, 7, %g3 /* IEU1 */ ldxa [rSTR1 + rSTR2] ASI_PNF, rBARREL
be,a,pn %icc, 1b /* CTI */
ldxa [%o0] ASI_PNF, %o2 /* Load Group */ srlx rTMP1, rSTRXOR, rTMP2
9: sllx %g3, 3, %g5 /* IEU0 */
mov 64, %o5 /* IEU1 */
sub %o1, %g3, %o1 /* IEU0 Group */
sub %o5, %g5, %o5 /* IEU1 */ orn rWORD1, rTMP2, rWORD1
ldxa [%o1] ASI_PNF, %g6 /* Load Group */ ba,pt %xcc, .Laligned_loop_entry
or %g1, %g2, %g1 /* IEU0 */ orn rBARREL, rTMP2, rWORD2
sub %o1, %o0, %o1 /* IEU1 */
sllx %g1, 7, %g2 /* IEU0 Group */ .Lneed_barrel_shift:
add %o1, 8, %o1 /* IEU1 */ sllx rSLL, 3, rSLL
/* %g1 = 0101010101010101 andn rSTR2, 0x7, rSTR2
* %g2 = 8080808080800880
* %g5 = number of bits to shift left
* %o5 = number of bits to shift right */
10: sllx %g6, %g5, %o3 /* IEU0 Group */
ldxa [%o1 + %o0] ASI_PNF, %g6 /* Load */
11: srlx %g6, %o5, %o4 /* IEU0 Group */ ldxa [rSTR1 + rSTR2] ASI_PNF, rBARREL
ldxa [%o0] ASI_PNF, %o2 /* Load */ mov 64, rTMP2
or %o3, %o4, %o3 /* IEU1 */ sub rTMP2, rSLL, rSRL
add %o0, 8, %o0 /* IEU0 Group */
subcc %o2, %o3, %g0 /* IEU1 */ srlx rTMP1, rSTRXOR, rTMP1
#ifdef EIGHTBIT_NOT_RARE add rSTR2, 8, rSTR2
sub %o2, %g1, %g3 /* IEU0 Group */
bne,pn %xcc, 13b /* CTI */
andn %g3, %o2, %g4 /* IEU0 Group */
andcc %g4, %g2, %g0 /* IEU1 Group */ orn rWORD1, rTMP1, rWORD1
be,pt %xcc, 10b /* CTI */ sllx rBARREL, rSLL, rWORD2
srlx %g4, 32, %g4 /* IEU0 */ ldxa [rSTR1 + rSTR2] ASI_PNF, rBARREL
andcc %g4, %g2, %g0 /* IEU1 Group */
#else
bne,pn %xcc, 13b /* CTI */
sub %o2, %g1, %g3 /* IEU0 Group */
andcc %g3, %g2, %g0 /* IEU1 Group */
be,pt %xcc, 10b /* CTI */ add rSTR1, 8, rSTR1
srlx %g3, 32, %g3 /* IEU0 */ sub rWORD1, r0101, rTMP2
andcc %g3, %g2, %g0 /* IEU1 Group */
#endif
be,pt %xcc, 12f /* CTI */
srlx %o2, 56, %g3 /* IEU0 */ srlx rBARREL, rSRL, rSTRXOR
andcc %g3, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4b /* CTI */
srlx %o2, 48, %g3 /* IEU0 */
andcc %g3, 0xff, %g0 /* IEU1 Group */ or rWORD2, rSTRXOR, rWORD2
be,pn %icc, 4b /* CTI */
srlx %o2, 40, %g3 /* IEU0 */
andcc %g3, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4b /* CTI */ orn rWORD2, rTMP1, rWORD2
srlx %o2, 32, %g3 /* IEU0 */ ba,pt %xcc, .Lbarrel_shift_loop_entry
andcc %g3, 0xff, %g0 /* IEU1 Group */ andn r8080, rWORD1, rTMP1
be,pn %icc, 4b /* CTI */
12: srlx %o2, 24, %g3 /* IEU0 */ .Lbarrel_shift_loop:
andcc %g3, 0xff, %g0 /* IEU1 Group */ sllx rBARREL, rSLL, rWORD2
be,pn %icc, 4b /* CTI */ ldxa [rSTR1 + rSTR2] ASI_PNF, rBARREL
srlx %o2, 16, %g3 /* IEU0 */
andcc %g3, 0xff, %g0 /* IEU1 Group */ add rSTR1, 8, rSTR1
be,pn %icc, 4b /* CTI */ sub rWORD1, r0101, rTMP2
srlx %o2, 8, %g3 /* IEU0 */
andcc %g3, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 4b /* CTI */ srlx rBARREL, rSRL, rSTRXOR
andcc %o2, 0xff, %g0 /* IEU1 Group */ andn r8080, rWORD1, rTMP1
be,pn %icc, 4b /* CTI */
sllx %g6, %g5, %o3 /* IEU0 */
ba,pt %xcc, 11b /* CTI Group */ or rWORD2, rSTRXOR, rWORD2
ldxa [%o1 + %o0] ASI_PNF, %g6 /* Load */
.Lbarrel_shift_loop_entry:
xorcc rWORD1, rWORD2, rSTRXOR
bne,pn %xcc, .Lcommon_endstring
andcc rTMP1, rTMP2, %g0
be,a,pt %xcc, .Lbarrel_shift_loop
ldxa [rSTR1] ASI_PNF, rWORD1
retl
mov 0, %o0
END(strcmp) END(strcmp)
libc_hidden_builtin_def (strcmp) libc_hidden_builtin_def (strcmp)