mirror of
				https://sourceware.org/git/glibc.git
				synced 2025-11-03 20:53:13 +03:00 
			
		
		
		
	x86-64: Optimize L(between_2_3) in memcmp-avx2-movbe.S
Turn movzbl -1(%rdi, %rdx), %edi movzbl -1(%rsi, %rdx), %esi orl %edi, %eax orl %esi, %ecx into movb -1(%rdi, %rdx), %al movb -1(%rsi, %rdx), %cl * sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S (between_2_3): Replace movzbl and orl with movb.
This commit is contained in:
		@@ -1,3 +1,8 @@
 | 
			
		||||
2017-06-23  H.J. Lu  <hongjiu.lu@intel.com>
 | 
			
		||||
 | 
			
		||||
	* sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S (between_2_3):
 | 
			
		||||
	Replace movzbl and orl with movb.
 | 
			
		||||
 | 
			
		||||
2017-06-23  Gabriel F. T. Gomes  <gftg@linux.vnet.ibm.com>
 | 
			
		||||
 | 
			
		||||
	* manual/arith.texi (Infinity and NaN): Document SNANFN and SNANFNx.
 | 
			
		||||
 
 | 
			
		||||
@@ -144,10 +144,8 @@ L(between_2_3):
 | 
			
		||||
	shll	$8, %ecx
 | 
			
		||||
	bswap	%eax
 | 
			
		||||
	bswap	%ecx
 | 
			
		||||
	movzbl	-1(%rdi, %rdx), %edi
 | 
			
		||||
	movzbl	-1(%rsi, %rdx), %esi
 | 
			
		||||
	orl	%edi, %eax
 | 
			
		||||
	orl	%esi, %ecx
 | 
			
		||||
	movb	-1(%rdi, %rdx), %al
 | 
			
		||||
	movb	-1(%rsi, %rdx), %cl
 | 
			
		||||
	/* Subtraction is okay because the upper 8 bits are zero.  */
 | 
			
		||||
	subl	%ecx, %eax
 | 
			
		||||
	ret
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user