mirror of
				https://sourceware.org/git/glibc.git
				synced 2025-11-03 20:53:13 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			112 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			112 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
/* Add two limb vectors of the same length > 0 and store sum in a third
 | 
						|
   limb vector.
 | 
						|
   Copyright (C) 1992-2017 Free Software Foundation, Inc.
 | 
						|
   This file is part of the GNU MP Library.
 | 
						|
 | 
						|
   The GNU MP Library is free software; you can redistribute it and/or modify
 | 
						|
   it under the terms of the GNU Lesser General Public License as published by
 | 
						|
   the Free Software Foundation; either version 2.1 of the License, or (at your
 | 
						|
   option) any later version.
 | 
						|
 | 
						|
   The GNU MP Library is distributed in the hope that it will be useful, but
 | 
						|
   WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 | 
						|
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
 | 
						|
   License for more details.
 | 
						|
 | 
						|
   You should have received a copy of the GNU Lesser General Public License
 | 
						|
   along with the GNU MP Library; see the file COPYING.LIB.  If not,
 | 
						|
   see <http://www.gnu.org/licenses/>.  */
 | 
						|
 | 
						|
#include "sysdep.h"
 | 
						|
#include "asm-syntax.h"
 | 
						|
 | 
						|
#define PARMS	4+8	/* space for 2 saved regs */
 | 
						|
#define RES	PARMS
 | 
						|
#define S1	RES+4
 | 
						|
#define S2	S1+4
 | 
						|
#define SIZE	S2+4
 | 
						|
 | 
						|
	.text
 | 
						|
ENTRY (__mpn_add_n)
 | 
						|
 | 
						|
	pushl %edi
 | 
						|
	cfi_adjust_cfa_offset (4)
 | 
						|
	pushl %esi
 | 
						|
	cfi_adjust_cfa_offset (4)
 | 
						|
 | 
						|
	movl RES(%esp),%edi
 | 
						|
	cfi_rel_offset (edi, 4)
 | 
						|
	movl S1(%esp),%esi
 | 
						|
	cfi_rel_offset (esi, 0)
 | 
						|
	movl S2(%esp),%edx
 | 
						|
	movl SIZE(%esp),%ecx
 | 
						|
	movl	%ecx,%eax
 | 
						|
	shrl	$3,%ecx			/* compute count for unrolled loop */
 | 
						|
	negl	%eax
 | 
						|
	andl	$7,%eax			/* get index where to start loop */
 | 
						|
	jz	L(oop)			/* necessary special case for 0 */
 | 
						|
	incl	%ecx			/* adjust loop count */
 | 
						|
	shll	$2,%eax			/* adjustment for pointers... */
 | 
						|
	subl	%eax,%edi		/* ... since they are offset ... */
 | 
						|
	subl	%eax,%esi		/* ... by a constant when we ... */
 | 
						|
	subl	%eax,%edx		/* ... enter the loop */
 | 
						|
	shrl	$2,%eax			/* restore previous value */
 | 
						|
#ifdef PIC
 | 
						|
/* Calculate start address in loop for PIC.  Due to limitations in some
 | 
						|
   assemblers, Loop-L0-3 cannot be put into the leal */
 | 
						|
	call	L(0)
 | 
						|
	cfi_adjust_cfa_offset (4)
 | 
						|
L(0):	leal	(%eax,%eax,8),%eax
 | 
						|
	addl	(%esp),%eax
 | 
						|
	addl	$(L(oop)-L(0)-3),%eax
 | 
						|
	addl	$4,%esp
 | 
						|
	cfi_adjust_cfa_offset (-4)
 | 
						|
#else
 | 
						|
/* Calculate start address in loop for non-PIC.  */
 | 
						|
 	leal	(L(oop) - 3)(%eax,%eax,8),%eax
 | 
						|
#endif
 | 
						|
	jmp	*%eax			/* jump into loop */
 | 
						|
	ALIGN (3)
 | 
						|
L(oop):	movl	(%esi),%eax
 | 
						|
	adcl	(%edx),%eax
 | 
						|
	movl	%eax,(%edi)
 | 
						|
	movl	4(%esi),%eax
 | 
						|
	adcl	4(%edx),%eax
 | 
						|
	movl	%eax,4(%edi)
 | 
						|
	movl	8(%esi),%eax
 | 
						|
	adcl	8(%edx),%eax
 | 
						|
	movl	%eax,8(%edi)
 | 
						|
	movl	12(%esi),%eax
 | 
						|
	adcl	12(%edx),%eax
 | 
						|
	movl	%eax,12(%edi)
 | 
						|
	movl	16(%esi),%eax
 | 
						|
	adcl	16(%edx),%eax
 | 
						|
	movl	%eax,16(%edi)
 | 
						|
	movl	20(%esi),%eax
 | 
						|
	adcl	20(%edx),%eax
 | 
						|
	movl	%eax,20(%edi)
 | 
						|
	movl	24(%esi),%eax
 | 
						|
	adcl	24(%edx),%eax
 | 
						|
	movl	%eax,24(%edi)
 | 
						|
	movl	28(%esi),%eax
 | 
						|
	adcl	28(%edx),%eax
 | 
						|
	movl	%eax,28(%edi)
 | 
						|
	leal	32(%edi),%edi
 | 
						|
	leal	32(%esi),%esi
 | 
						|
	leal	32(%edx),%edx
 | 
						|
	decl	%ecx
 | 
						|
	jnz	L(oop)
 | 
						|
 | 
						|
	sbbl	%eax,%eax
 | 
						|
	negl	%eax
 | 
						|
 | 
						|
	popl %esi
 | 
						|
	cfi_adjust_cfa_offset (-4)
 | 
						|
	cfi_restore (esi)
 | 
						|
	popl %edi
 | 
						|
	cfi_adjust_cfa_offset (-4)
 | 
						|
	cfi_restore (edi)
 | 
						|
 | 
						|
	ret
 | 
						|
END (__mpn_add_n)
 |