mirror of
https://sourceware.org/git/glibc.git
synced 2025-12-24 17:51:17 +03:00
[x86] Add a feature bit: Fast_Unaligned_Copy
On AMD processors, memcpy optimized with unaligned SSE load is slower than emcpy optimized with aligned SSSE3 while other string functions are faster with unaligned SSE load. A feature bit, Fast_Unaligned_Copy, is added to select memcpy optimized with unaligned SSE load. [BZ #19583] * sysdeps/x86/cpu-features.c (init_cpu_features): Set Fast_Unaligned_Copy with Fast_Unaligned_Load for Intel processors. Set Fast_Copy_Backward for AMD Excavator processors. * sysdeps/x86/cpu-features.h (bit_arch_Fast_Unaligned_Copy): New. (index_arch_Fast_Unaligned_Copy): Likewise. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Check Fast_Unaligned_Copy instead of Fast_Unaligned_Load.
This commit is contained in:
@@ -35,6 +35,7 @@
|
||||
#define bit_arch_I686 (1 << 15)
|
||||
#define bit_arch_Prefer_MAP_32BIT_EXEC (1 << 16)
|
||||
#define bit_arch_Prefer_No_VZEROUPPER (1 << 17)
|
||||
#define bit_arch_Fast_Unaligned_Copy (1 << 18)
|
||||
|
||||
/* CPUID Feature flags. */
|
||||
|
||||
@@ -101,6 +102,7 @@
|
||||
# define index_arch_I686 FEATURE_INDEX_1*FEATURE_SIZE
|
||||
# define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE
|
||||
# define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE
|
||||
# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1*FEATURE_SIZE
|
||||
|
||||
|
||||
# if defined (_LIBC) && !IS_IN (nonlib)
|
||||
@@ -265,6 +267,7 @@ extern const struct cpu_features *__get_cpu_features (void)
|
||||
# define index_arch_I686 FEATURE_INDEX_1
|
||||
# define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1
|
||||
# define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1
|
||||
# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
|
||||
Reference in New Issue
Block a user