1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-04-26 15:09:05 +03:00
glibc/sysdeps/tile/string-endian.h
Adhemerval Zanella 24d1d8ec9e Simplify tilegx sysdeps folder
With tilepro support removal we can now simplify internal tile support by
moving the directory structure to avoid the unnecessary directory levels
in tile/tilegx both on generic and linux folders.

Checked with a build for tilegx-linux-gnu and tilegx-linux-gnu-32 with
and without the patch, there is no difference in generated binary with
a dissassemble.

	* stdlib/bug-getcontext.c (do_test): Remove tilepro mention in
	comment.
	* sysdeps/tile/preconfigure: Remove tilegx folder.
	* sysdeps/tile/tilegx/Implies: Move definitions to ...
	* sysdeps/tile/Implies: ... here.
	* sysdeps/tile/tilegx/Makefile: Move rules to ...
	* sysdeps/tile/Makefile: ... here.
	* sysdeps/tile/tilegx/atomic-machine.h: Move definitions to ...
	* sysdeps/tile/atomic-machine.h: ... here.  Add include guards.
	* sysdeps/tile/tilegx/bits/wordsize.h: Move to ...
	* sysdeps/tile/bits/wordsize.h: ... here.
	* sysdeps/tile/tilegx/*: Move to ...
	* sysdeps/tile/*: ... here.
	* sysdeps/tile/tilegx/tilegx32/Implies: Move to ...
	* sysdeps/tile/tilegx32/Implies: ... here.
	* sysdeps/tile/tilegx/tilegx64/Implies: Move to ...
	* sysdeps/tile/tilegx64/Implies: ... here.
	* sysdeps/unix/sysv/linux/tile/tilegx/Makefile: Move definitions
	to ...
	* sysdeps/unix/sysv/linux/tile/Makefile: ... here.
	* sysdeps/unix/sysv/linux/tile/tilegx/*: Move to ...
	* sysdeps/unix/sysv/linux/tile/*: ... here.
	* sysdeps/unix/sysv/linux/tile/tilegx/tilegx32/*: Move to ...
	* sysdeps/unix/sysv/linux/tile/tilegx32/*: ... here.
	* sysdeps/unix/sysv/linux/tile/tilegx/tilegx64/*: Move to ...
	* sysdeps/unix/sysv/linux/tile/tilegx64/*: ... here.
2017-12-20 16:55:26 -02:00

85 lines
3.3 KiB
C

/* Copyright (C) 2011-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<http://www.gnu.org/licenses/>. */
#include <endian.h>
#include <stdint.h>
/* Provide a set of macros to help keep endianness #ifdefs out of
the string functions.
MASK: Provide a mask based on the pointer alignment that
sets up non-zero bytes before the beginning of the string.
The MASK expression works because shift counts are taken mod 64.
NULMASK: Clear bytes beyond a given point in the string.
CFZ: Find the first zero bit in the 8 string bytes in a long.
REVCZ: Find the last zero bit in the 8 string bytes in a long.
STRSHIFT: Shift N bits towards the start of the string. */
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define MASK(x) (__insn_shl(1ULL, (x << 3)) - 1)
#define NULMASK(x) ((2ULL << x) - 1)
#define CFZ(x) __insn_ctz(x)
#define REVCZ(x) __insn_clz(x)
#define STRSHIFT(x,n) ((x) >> n)
#else
#define MASK(x) (__insn_shl(-2LL, ((-x << 3) - 1)))
#define NULMASK(x) (-2LL << (63 - x))
#define CFZ(x) __insn_clz(x)
#define REVCZ(x) __insn_ctz(x)
#define STRSHIFT(x,n) ((x) << n)
#endif
/* Create eight copies of the byte in a uint64_t. Byte Shuffle uses
the bytes of srcB as the index into the dest vector to select a
byte. With all indices of zero, the first byte is copied into all
the other bytes. */
static inline uint64_t copy_byte(uint8_t byte)
{
return __insn_shufflebytes(byte, 0, 0);
}
/* Implement the byte vector instructions using extended assembly.
The __insn_OP() builtins are buggy in the upstream compiler;
see gcc bugzilla 78117. */
#define VECOP(OP) \
static inline uint64_t OP (uint64_t a, uint64_t b) \
{ \
uint64_t result; \
asm volatile (#OP " %0, %1, %2" : "=r"(result) : "r"(a), "r"(b)); \
return result; \
} \
\
static inline uint64_t OP ## i (uint64_t a, uint64_t b) \
{ \
uint64_t result; \
asm volatile (#OP "i %0, %1, %2" : "=r"(result) : "r"(a), "I"(b)); \
return result; \
}
VECOP(v1cmpeq)
VECOP(v1cmpltu)
VECOP(v1cmpne)
VECOP(v1add)
VECOP(v1shru)
VECOP(v1shl)