mirror of
				https://github.com/MariaDB/server.git
				synced 2025-10-31 15:50:51 +03:00 
			
		
		
		
	Changed assembler functions to not access global variables or variables in text segement Added wrapper function in C to longlong2str() to pass _dig_vec_upper as an argument mysql-test/r/bigint.result: More tests for parsing of bigint's More tests for different values to conv() mysql-test/t/bigint.test: More tests for parsing of bigint's More tests for different values to conv() strings/Makefile.am: Added longlong2str_asm.c strings/longlong2str-x86.s: Changed functions to not access variables in text segment Fixed this by adding global variable '_dig_vec_upper' as an argument to longlong2str_with_dig_vector() strings/my_strtoll10-x86.s: Removd array lfactor by calculating the value in code (this is to to make the code position independent) strings/longlong2str_asm.c: New BitKeeper file ``strings/longlong2str_asm.c''
		
			
				
	
	
		
			111 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			111 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| #
 | |
| # Initialize
 | |
| 
 | |
| --disable_warnings
 | |
| drop table if exists t1;
 | |
| --enable_warnings
 | |
| 
 | |
| #
 | |
| # Test of reading of bigint values
 | |
| #
 | |
| select 0,256,00000000000000065536,2147483647,-2147483648,2147483648,+4294967296;
 | |
| select 9223372036854775807,-009223372036854775808;
 | |
| select +9999999999999999999,-9999999999999999999;
 | |
| select cast(9223372036854775808 as unsigned)+1;
 | |
| select 9223372036854775808+1;
 | |
| select -(0-3),round(-(0-3)), round(9999999999999999999);
 | |
| select 1,11,101,1001,10001,100001,1000001,10000001,100000001,1000000001,10000000001,100000000001,1000000000001,10000000000001,100000000000001,1000000000000001,10000000000000001,100000000000000001,1000000000000000001,10000000000000000001;
 | |
| select -1,-11,-101,-1001,-10001,-100001,-1000001,-10000001,-100000001,-1000000001,-10000000001,-100000000001,-1000000000001,-10000000000001,-100000000000001,-1000000000000001,-10000000000000001,-100000000000000001,-1000000000000000001,-10000000000000000001;
 | |
| select conv(1,10,16),conv((1<<2)-1,10,16),conv((1<<10)-2,10,16),conv((1<<16)-3,10,16),conv((1<<25)-4,10,16),conv((1<<31)-5,10,16),conv((1<<36)-6,10,16),conv((1<<47)-7,10,16),conv((1<<48)-8,10,16),conv((1<<55)-9,10,16),conv((1<<56)-10,10,16),conv((1<<63)-11,10,16);
 | |
| 
 | |
| #
 | |
| # In 3.23 we have to disable the test of column to bigint as
 | |
| # this fails on AIX powerpc (the resolution for double is not good enough)
 | |
| # This will work on 4.0 as we then have internal handling of bigint variables.
 | |
| #
 | |
| 
 | |
| create table t1 (a bigint unsigned not null, primary key(a));
 | |
| insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFE), (18446744073709551613), (18446744073709551612);
 | |
| select * from t1;
 | |
| select * from t1 where a=18446744073709551615;
 | |
| # select * from t1 where a='18446744073709551615';
 | |
| delete from t1 where a=18446744073709551615;
 | |
| select * from t1;
 | |
| drop table t1;
 | |
| 
 | |
| create table t1 ( a int not null default 1, big bigint );
 | |
| insert into t1 (big) values (-1),(12345678901234567),(9223372036854775807),(18446744073709551615);
 | |
| select min(big),max(big),max(big)-1 from t1;
 | |
| select min(big),max(big),max(big)-1 from t1 group by a;
 | |
| alter table t1 modify big bigint unsigned not null;
 | |
| select min(big),max(big),max(big)-1 from t1;
 | |
| select min(big),max(big),max(big)-1 from t1 group by a;
 | |
| alter table t1 add key (big);
 | |
| select min(big),max(big),max(big)-1 from t1;
 | |
| select min(big),max(big),max(big)-1 from t1 group by a;
 | |
| alter table t1 modify big bigint not null;
 | |
| select min(big),max(big),max(big)-1 from t1;
 | |
| select min(big),max(big),max(big)-1 from t1 group by a;
 | |
| drop table t1;
 | |
| 
 | |
| #
 | |
| # Test problem with big values fir auto_increment
 | |
| #
 | |
| 
 | |
| create table t1 (id bigint auto_increment primary key, a int) auto_increment=9999999999;
 | |
| insert into t1 values (null,1);
 | |
| select * from t1;
 | |
| select * from t1 limit 9999999999;
 | |
| drop table t1;
 | |
| 
 | |
| #
 | |
| # Item_uint::save_to_field()
 | |
| # BUG#1845
 | |
| # This can't be fixed in MySQL 4.0 without loosing precisions for bigints
 | |
| #
 | |
| 
 | |
| CREATE TABLE t1 ( quantity decimal(60,0));
 | |
| insert into t1 values (10000000000000000000);
 | |
| insert into t1 values (10000000000000000000.0);
 | |
| insert into t1 values ('10000000000000000000');
 | |
| select * from t1;
 | |
| drop table t1;
 | |
| 
 | |
| # atof() behaviour is different of different systems. to be fixed in 4.1
 | |
| SELECT '0x8000000000000001'+0;
 | |
| 
 | |
| # Test for BUG#8562: joins over BIGINT UNSIGNED value + constant propagation
 | |
| create table t1 (
 | |
|  value64  bigint unsigned  not null,
 | |
|  value32  integer          not null,
 | |
|  primary key(value64, value32)
 | |
| );
 | |
| 
 | |
| create table t2 (
 | |
|  value64  bigint unsigned  not null,
 | |
|  value32  integer          not null,
 | |
|  primary key(value64, value32)
 | |
| );
 | |
| 
 | |
| insert into t1 values(17156792991891826145, 1);
 | |
| insert into t1 values( 9223372036854775807, 2);
 | |
| insert into t2 values(17156792991891826145, 3);
 | |
| insert into t2 values( 9223372036854775807, 4);
 | |
| 
 | |
| select * from t1;
 | |
| select * from t2;
 | |
| 
 | |
| select * from t1, t2 where t1.value64=17156792991891826145 and
 | |
| t2.value64=17156792991891826145;
 | |
| select * from t1, t2 where t1.value64=17156792991891826145 and
 | |
| t2.value64=t1.value64;
 | |
| 
 | |
| select * from t1, t2 where t1.value64= 9223372036854775807 and
 | |
| t2.value64=9223372036854775807;
 | |
| select * from t1, t2 where t1.value64= 9223372036854775807 and
 | |
| t2.value64=t1.value64;
 | |
| 
 | |
| drop table t1, t2;
 | |
| 
 | |
| # End of 4.1 tests
 |