mirror of
				https://sourceware.org/git/glibc.git
				synced 2025-10-31 22:10:34 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			397 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			397 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* Copyright (C) 2000-2017 Free Software Foundation, Inc.
 | |
|    This file is part of the GNU C Library.
 | |
| 
 | |
|    The GNU C Library is free software; you can redistribute it and/or
 | |
|    modify it under the terms of the GNU Lesser General Public
 | |
|    License as published by the Free Software Foundation; either
 | |
|    version 2.1 of the License, or (at your option) any later version.
 | |
| 
 | |
|    The GNU C Library is distributed in the hope that it will be useful,
 | |
|    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | |
|    Lesser General Public License for more details.
 | |
| 
 | |
|    You should have received a copy of the GNU Lesser General Public
 | |
|    License along with the GNU C Library.  If not, see
 | |
|    <http://www.gnu.org/licenses/>.  */
 | |
| 
 | |
| #ifndef _LINUX_MIPS_MIPS32_SYSDEP_H
 | |
| #define _LINUX_MIPS_MIPS32_SYSDEP_H 1
 | |
| 
 | |
| /* There is some commonality.  */
 | |
| #include <sysdeps/unix/sysv/linux/sysdep.h>
 | |
| #include <sysdeps/unix/mips/mips32/sysdep.h>
 | |
| 
 | |
| #include <tls.h>
 | |
| 
 | |
| /* In order to get __set_errno() definition in INLINE_SYSCALL.  */
 | |
| #ifndef __ASSEMBLER__
 | |
| #include <errno.h>
 | |
| #endif
 | |
| 
 | |
| /* For Linux we can use the system call table in the header file
 | |
| 	/usr/include/asm/unistd.h
 | |
|    of the kernel.  But these symbols do not follow the SYS_* syntax
 | |
|    so we have to redefine the `SYS_ify' macro here.  */
 | |
| #undef SYS_ify
 | |
| #define SYS_ify(syscall_name)	__NR_##syscall_name
 | |
| 
 | |
| #ifdef __ASSEMBLER__
 | |
| 
 | |
| /* We don't want the label for the error handler to be visible in the symbol
 | |
|    table when we define it here.  */
 | |
| #ifdef __PIC__
 | |
| # define SYSCALL_ERROR_LABEL 99b
 | |
| #endif
 | |
| 
 | |
| #else   /* ! __ASSEMBLER__ */
 | |
| 
 | |
| /* Define a macro which expands into the inline wrapper code for a system
 | |
|    call.  */
 | |
| #undef INLINE_SYSCALL
 | |
| #define INLINE_SYSCALL(name, nr, args...)                               \
 | |
|   ({ INTERNAL_SYSCALL_DECL (_sc_err);					\
 | |
|      long result_var = INTERNAL_SYSCALL (name, _sc_err, nr, args);	\
 | |
|      if ( INTERNAL_SYSCALL_ERROR_P (result_var, _sc_err) )		\
 | |
|        {								\
 | |
| 	 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, _sc_err));	\
 | |
| 	 result_var = -1L;						\
 | |
|        }								\
 | |
|      result_var; })
 | |
| 
 | |
| #undef INTERNAL_SYSCALL_DECL
 | |
| #define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
 | |
| 
 | |
| #undef INTERNAL_SYSCALL_ERROR_P
 | |
| #define INTERNAL_SYSCALL_ERROR_P(val, err)   ((void) (val), (long) (err))
 | |
| 
 | |
| #undef INTERNAL_SYSCALL_ERRNO
 | |
| #define INTERNAL_SYSCALL_ERRNO(val, err)     ((void) (err), val)
 | |
| 
 | |
| /* Note that the original Linux syscall restart convention required the
 | |
|    instruction immediately preceding SYSCALL to initialize $v0 with the
 | |
|    syscall number.  Then if a restart triggered, $v0 would have been
 | |
|    clobbered by the syscall interrupted, and needed to be reinititalized.
 | |
|    The kernel would decrement the PC by 4 before switching back to the
 | |
|    user mode so that $v0 had been reloaded before SYSCALL was executed
 | |
|    again.  This implied the place $v0 was loaded from must have been
 | |
|    preserved across a syscall, e.g. an immediate, static register, stack
 | |
|    slot, etc.
 | |
| 
 | |
|    The convention was relaxed in Linux with a change applied to the kernel
 | |
|    GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
 | |
|    first appeared in the 2.6.36 release.  Since then the kernel has had
 | |
|    code that reloads $v0 upon syscall restart and resumes right at the
 | |
|    SYSCALL instruction, so no special arrangement is needed anymore.
 | |
| 
 | |
|    For backwards compatibility with existing kernel binaries we support
 | |
|    the old convention by choosing the instruction preceding SYSCALL
 | |
|    carefully.  This also means we have to force a 32-bit encoding of the
 | |
|    microMIPS MOVE instruction if one is used.  */
 | |
| 
 | |
| #ifdef __mips_micromips
 | |
| # define MOVE32 "move32"
 | |
| #else
 | |
| # define MOVE32 "move"
 | |
| #endif
 | |
| 
 | |
| #undef INTERNAL_SYSCALL
 | |
| #undef INTERNAL_SYSCALL_NCS
 | |
| 
 | |
| #ifdef __mips16
 | |
| /* There's no MIPS16 syscall instruction, so we go through out-of-line
 | |
|    standard MIPS wrappers.  These do use inline snippets below though,
 | |
|    through INTERNAL_SYSCALL_MIPS16.  Spilling the syscall number to
 | |
|    memory gives the best code in that case, avoiding the need to save
 | |
|    and restore a static register.  */
 | |
| 
 | |
| # include <mips16-syscall.h>
 | |
| 
 | |
| # define INTERNAL_SYSCALL(name, err, nr, args...)			\
 | |
| 	INTERNAL_SYSCALL_NCS (SYS_ify (name), err, nr, args)
 | |
| 
 | |
| # define INTERNAL_SYSCALL_NCS(number, err, nr, args...)			\
 | |
| ({									\
 | |
| 	union __mips16_syscall_return _sc_ret;				\
 | |
| 	_sc_ret.val = __mips16_syscall##nr (args, number);		\
 | |
| 	err = _sc_ret.reg.v1;						\
 | |
| 	_sc_ret.reg.v0;							\
 | |
| })
 | |
| 
 | |
| # define INTERNAL_SYSCALL_MIPS16(number, err, nr, args...)		\
 | |
| 	internal_syscall##nr ("lw\t%0, %2\n\t",				\
 | |
| 			      "R" (number),				\
 | |
| 			      0, err, args)
 | |
| 
 | |
| #else /* !__mips16 */
 | |
| # define INTERNAL_SYSCALL(name, err, nr, args...)			\
 | |
| 	internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t",	\
 | |
| 			      "IK" (SYS_ify (name)),			\
 | |
| 			      0, err, args)
 | |
| 
 | |
| # define INTERNAL_SYSCALL_NCS(number, err, nr, args...)			\
 | |
| 	internal_syscall##nr (MOVE32 "\t%0, %2\n\t",			\
 | |
| 			      "r" (__s0),				\
 | |
| 			      number, err, args)
 | |
| 
 | |
| #endif /* !__mips16 */
 | |
| 
 | |
| #define internal_syscall0(v0_init, input, number, err, dummy...)	\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a3 asm ("$7");					\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	".set reorder"							\
 | |
| 	: "=r" (__v0), "=r" (__a3)					\
 | |
| 	: input								\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| #define internal_syscall1(v0_init, input, number, err, arg1)		\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a0 asm ("$4") = (long) (arg1);			\
 | |
| 	register long __a3 asm ("$7");					\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	".set reorder"							\
 | |
| 	: "=r" (__v0), "=r" (__a3)					\
 | |
| 	: input, "r" (__a0)						\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| #define internal_syscall2(v0_init, input, number, err, arg1, arg2)	\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a0 asm ("$4") = (long) (arg1);			\
 | |
| 	register long __a1 asm ("$5") = (long) (arg2);			\
 | |
| 	register long __a3 asm ("$7");					\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	".set\treorder"							\
 | |
| 	: "=r" (__v0), "=r" (__a3)					\
 | |
| 	: input, "r" (__a0), "r" (__a1)					\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| #define internal_syscall3(v0_init, input, number, err,			\
 | |
| 			  arg1, arg2, arg3)				\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a0 asm ("$4") = (long) (arg1);			\
 | |
| 	register long __a1 asm ("$5") = (long) (arg2);			\
 | |
| 	register long __a2 asm ("$6") = (long) (arg3);			\
 | |
| 	register long __a3 asm ("$7");					\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	".set\treorder"							\
 | |
| 	: "=r" (__v0), "=r" (__a3)					\
 | |
| 	: input, "r" (__a0), "r" (__a1), "r" (__a2)			\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| #define internal_syscall4(v0_init, input, number, err,			\
 | |
| 			  arg1, arg2, arg3, arg4)			\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a0 asm ("$4") = (long) (arg1);			\
 | |
| 	register long __a1 asm ("$5") = (long) (arg2);			\
 | |
| 	register long __a2 asm ("$6") = (long) (arg3);			\
 | |
| 	register long __a3 asm ("$7") = (long) (arg4);			\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	".set\treorder"							\
 | |
| 	: "=r" (__v0), "+r" (__a3)					\
 | |
| 	: input, "r" (__a0), "r" (__a1), "r" (__a2)			\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| /* We need to use a frame pointer for the functions in which we
 | |
|    adjust $sp around the syscall, or debug information and unwind
 | |
|    information will be $sp relative and thus wrong during the syscall.  As
 | |
|    of GCC 4.7, this is sufficient.  */
 | |
| #define FORCE_FRAME_POINTER						\
 | |
|   void *volatile __fp_force __attribute__ ((unused)) = alloca (4)
 | |
| 
 | |
| #define internal_syscall5(v0_init, input, number, err,			\
 | |
| 			  arg1, arg2, arg3, arg4, arg5)			\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	FORCE_FRAME_POINTER;						\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a0 asm ("$4") = (long) (arg1);			\
 | |
| 	register long __a1 asm ("$5") = (long) (arg2);			\
 | |
| 	register long __a2 asm ("$6") = (long) (arg3);			\
 | |
| 	register long __a3 asm ("$7") = (long) (arg4);			\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	"subu\t$29, 32\n\t"						\
 | |
| 	"sw\t%6, 16($29)\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	"addiu\t$29, 32\n\t"						\
 | |
| 	".set\treorder"							\
 | |
| 	: "=r" (__v0), "+r" (__a3)					\
 | |
| 	: input, "r" (__a0), "r" (__a1), "r" (__a2),			\
 | |
| 	  "r" ((long) (arg5))						\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| #define internal_syscall6(v0_init, input, number, err,			\
 | |
| 			  arg1, arg2, arg3, arg4, arg5, arg6)		\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	FORCE_FRAME_POINTER;						\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a0 asm ("$4") = (long) (arg1);			\
 | |
| 	register long __a1 asm ("$5") = (long) (arg2);			\
 | |
| 	register long __a2 asm ("$6") = (long) (arg3);			\
 | |
| 	register long __a3 asm ("$7") = (long) (arg4);			\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	"subu\t$29, 32\n\t"						\
 | |
| 	"sw\t%6, 16($29)\n\t"						\
 | |
| 	"sw\t%7, 20($29)\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	"addiu\t$29, 32\n\t"						\
 | |
| 	".set\treorder"							\
 | |
| 	: "=r" (__v0), "+r" (__a3)					\
 | |
| 	: input, "r" (__a0), "r" (__a1), "r" (__a2),			\
 | |
| 	  "r" ((long) (arg5)), "r" ((long) (arg6))			\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| #define internal_syscall7(v0_init, input, number, err,			\
 | |
| 			  arg1, arg2, arg3, arg4, arg5, arg6, arg7)	\
 | |
| ({									\
 | |
| 	long _sys_result;						\
 | |
| 									\
 | |
| 	FORCE_FRAME_POINTER;						\
 | |
| 	{								\
 | |
| 	register long __s0 asm ("$16") __attribute__ ((unused))		\
 | |
| 	  = (number);							\
 | |
| 	register long __v0 asm ("$2");					\
 | |
| 	register long __a0 asm ("$4") = (long) (arg1);			\
 | |
| 	register long __a1 asm ("$5") = (long) (arg2);			\
 | |
| 	register long __a2 asm ("$6") = (long) (arg3);			\
 | |
| 	register long __a3 asm ("$7") = (long) (arg4);			\
 | |
| 	__asm__ volatile (						\
 | |
| 	".set\tnoreorder\n\t"						\
 | |
| 	"subu\t$29, 32\n\t"						\
 | |
| 	"sw\t%6, 16($29)\n\t"						\
 | |
| 	"sw\t%7, 20($29)\n\t"						\
 | |
| 	"sw\t%8, 24($29)\n\t"						\
 | |
| 	v0_init								\
 | |
| 	"syscall\n\t"							\
 | |
| 	"addiu\t$29, 32\n\t"						\
 | |
| 	".set\treorder"							\
 | |
| 	: "=r" (__v0), "+r" (__a3)					\
 | |
| 	: input, "r" (__a0), "r" (__a1), "r" (__a2),			\
 | |
| 	  "r" ((long) (arg5)), "r" ((long) (arg6)), "r" ((long) (arg7))	\
 | |
| 	: __SYSCALL_CLOBBERS);						\
 | |
| 	err = __a3;							\
 | |
| 	_sys_result = __v0;						\
 | |
| 	}								\
 | |
| 	_sys_result;							\
 | |
| })
 | |
| 
 | |
| #define __SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", \
 | |
| 	"$14", "$15", "$24", "$25", "hi", "lo", "memory"
 | |
| 
 | |
| /* Standard MIPS syscalls have an error flag, and return a positive errno
 | |
|    when the error flag is set. Emulate this behaviour for vsyscalls so that
 | |
|    the INTERNAL_SYSCALL_{ERROR_P,ERRNO} macros work correctly.  */
 | |
| #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...)		\
 | |
|   ({									\
 | |
|     long _ret = funcptr (args);						\
 | |
|     err = ((unsigned long) (_ret) >= (unsigned long) -4095L);		\
 | |
|     if (err)								\
 | |
|       _ret = -_ret;							\
 | |
|     _ret;								\
 | |
|   })
 | |
| 
 | |
| /* List of system calls which are supported as vsyscalls.  */
 | |
| #define HAVE_CLOCK_GETTIME_VSYSCALL	1
 | |
| #define HAVE_GETTIMEOFDAY_VSYSCALL	1
 | |
| 
 | |
| #endif /* __ASSEMBLER__ */
 | |
| 
 | |
| /* Pointer mangling is not yet supported for MIPS.  */
 | |
| #define PTR_MANGLE(var) (void) (var)
 | |
| #define PTR_DEMANGLE(var) (void) (var)
 | |
| 
 | |
| #endif /* linux/mips/mips32/sysdep.h */
 |