mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Merge bk-internal.mysql.com:/home/bk/mysql-5.0
into mysql.com:/home/my/mysql-5.0
This commit is contained in:
@ -1109,3 +1109,4 @@ vio/viotest-ssl
|
|||||||
VC++Files/client/mysql_amd64.dsp
|
VC++Files/client/mysql_amd64.dsp
|
||||||
client/mysqltestmanager-pwgen
|
client/mysqltestmanager-pwgen
|
||||||
client/mysqltestmanagerc
|
client/mysqltestmanagerc
|
||||||
|
tools/mysqltestmanager
|
||||||
|
@ -645,6 +645,7 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char* fname)
|
|||||||
{
|
{
|
||||||
DBUG_PRINT("info",("Size differs: result size: %u file size: %u",
|
DBUG_PRINT("info",("Size differs: result size: %u file size: %u",
|
||||||
ds->length, stat_info.st_size));
|
ds->length, stat_info.st_size));
|
||||||
|
DBUG_PRINT("info",("result: '%s'", ds->str));
|
||||||
DBUG_RETURN(2);
|
DBUG_RETURN(2);
|
||||||
}
|
}
|
||||||
if (!(tmp = (char*) my_malloc(stat_info.st_size + 1, MYF(MY_WME))))
|
if (!(tmp = (char*) my_malloc(stat_info.st_size + 1, MYF(MY_WME))))
|
||||||
@ -3623,8 +3624,8 @@ static void init_var_hash(MYSQL *mysql)
|
|||||||
if (hash_init(&var_hash, charset_info,
|
if (hash_init(&var_hash, charset_info,
|
||||||
1024, 0, 0, get_var_key, var_free, MYF(0)))
|
1024, 0, 0, get_var_key, var_free, MYF(0)))
|
||||||
die("Variable hash initialization failed");
|
die("Variable hash initialization failed");
|
||||||
if (opt_big_test)
|
my_hash_insert(&var_hash, (byte*) var_init(0,"BIG_TEST", 0,
|
||||||
my_hash_insert(&var_hash, (byte*) var_init(0,"BIG_TEST", 0, "1",0));
|
(opt_big_test) ? "1" : "0", 0));
|
||||||
v= var_init(0,"MAX_TABLES", 0, (sizeof(ulong) == 4) ? "31" : "62",0);
|
v= var_init(0,"MAX_TABLES", 0, (sizeof(ulong) == 4) ? "31" : "62",0);
|
||||||
my_hash_insert(&var_hash, (byte*) v);
|
my_hash_insert(&var_hash, (byte*) v);
|
||||||
v= var_init(0,"SERVER_VERSION", 0, mysql_get_server_info(mysql), 0);
|
v= var_init(0,"SERVER_VERSION", 0, mysql_get_server_info(mysql), 0);
|
||||||
@ -3793,6 +3794,12 @@ int main(int argc, char **argv)
|
|||||||
if (q->query == q->query_buf)
|
if (q->query == q->query_buf)
|
||||||
q->query += q->first_word_len + 1;
|
q->query += q->first_word_len + 1;
|
||||||
display_result_vertically= (q->type==Q_QUERY_VERTICAL);
|
display_result_vertically= (q->type==Q_QUERY_VERTICAL);
|
||||||
|
if (save_file[0])
|
||||||
|
{
|
||||||
|
strmov(q->record_file,save_file);
|
||||||
|
q->require_file=require_file;
|
||||||
|
save_file[0]=0;
|
||||||
|
}
|
||||||
error|= run_query(&cur_con->mysql, q, QUERY_REAP|QUERY_SEND);
|
error|= run_query(&cur_con->mysql, q, QUERY_REAP|QUERY_SEND);
|
||||||
display_result_vertically= old_display_result_vertically;
|
display_result_vertically= old_display_result_vertically;
|
||||||
break;
|
break;
|
||||||
|
4
mysql-test/include/big_test.inc
Normal file
4
mysql-test/include/big_test.inc
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
--require r/big_test.require
|
||||||
|
disable_query_log;
|
||||||
|
eval select $BIG_TEST as using_big_test;
|
||||||
|
enable_query_log;
|
@ -1179,7 +1179,7 @@ start_master()
|
|||||||
--default-character-set=$CHARACTER_SET \
|
--default-character-set=$CHARACTER_SET \
|
||||||
--tmpdir=$MYSQL_TMP_DIR \
|
--tmpdir=$MYSQL_TMP_DIR \
|
||||||
--language=$LANGUAGE \
|
--language=$LANGUAGE \
|
||||||
--innodb_data_file_path=ibdata1:50M \
|
--innodb_data_file_path=ibdata1:128M:autoextend \
|
||||||
--open-files-limit=1024 \
|
--open-files-limit=1024 \
|
||||||
$MASTER_40_ARGS \
|
$MASTER_40_ARGS \
|
||||||
$SMALL_SERVER \
|
$SMALL_SERVER \
|
||||||
@ -1200,7 +1200,7 @@ start_master()
|
|||||||
$USE_NDBCLUSTER \
|
$USE_NDBCLUSTER \
|
||||||
--tmpdir=$MYSQL_TMP_DIR \
|
--tmpdir=$MYSQL_TMP_DIR \
|
||||||
--language=$LANGUAGE \
|
--language=$LANGUAGE \
|
||||||
--innodb_data_file_path=ibdata1:50M \
|
--innodb_data_file_path=ibdata1:128M:autoextend \
|
||||||
$MASTER_40_ARGS \
|
$MASTER_40_ARGS \
|
||||||
$SMALL_SERVER \
|
$SMALL_SERVER \
|
||||||
$EXTRA_MASTER_OPT $EXTRA_MASTER_MYSQLD_OPT \
|
$EXTRA_MASTER_OPT $EXTRA_MASTER_MYSQLD_OPT \
|
||||||
|
@ -436,30 +436,30 @@ f 10
|
|||||||
g 10
|
g 10
|
||||||
h 10
|
h 10
|
||||||
i 10
|
i 10
|
||||||
select sql_big_result v,count(t) from t1 group by v limit 10;
|
select sql_big_result trim(v),count(t) from t1 group by v limit 10;
|
||||||
v count(t)
|
trim(v) count(t)
|
||||||
a 1
|
a 1
|
||||||
a 10
|
a 10
|
||||||
b 10
|
b 10
|
||||||
c 10
|
c 10
|
||||||
d 10
|
d 10
|
||||||
e 10
|
e 10
|
||||||
f 10
|
f 10
|
||||||
g 10
|
g 10
|
||||||
h 10
|
h 10
|
||||||
i 10
|
i 10
|
||||||
select sql_big_result v,count(c) from t1 group by v limit 10;
|
select sql_big_result trim(v),count(c) from t1 group by v limit 10;
|
||||||
v count(c)
|
trim(v) count(c)
|
||||||
a 1
|
a 1
|
||||||
a 10
|
a 10
|
||||||
b 10
|
b 10
|
||||||
c 10
|
c 10
|
||||||
d 10
|
d 10
|
||||||
e 10
|
e 10
|
||||||
f 10
|
f 10
|
||||||
g 10
|
g 10
|
||||||
h 10
|
h 10
|
||||||
i 10
|
i 10
|
||||||
select c,count(*) from t1 group by c limit 10;
|
select c,count(*) from t1 group by c limit 10;
|
||||||
c count(*)
|
c count(*)
|
||||||
a 1
|
a 1
|
||||||
@ -520,18 +520,18 @@ f 10
|
|||||||
g 10
|
g 10
|
||||||
h 10
|
h 10
|
||||||
i 10
|
i 10
|
||||||
select sql_big_result t,count(t) from t1 group by t limit 10;
|
select sql_big_result trim(t),count(t) from t1 group by t limit 10;
|
||||||
t count(t)
|
trim(t) count(t)
|
||||||
a 1
|
a 1
|
||||||
a 10
|
a 10
|
||||||
b 10
|
b 10
|
||||||
c 10
|
c 10
|
||||||
d 10
|
d 10
|
||||||
e 10
|
e 10
|
||||||
f 10
|
f 10
|
||||||
g 10
|
g 10
|
||||||
h 10
|
h 10
|
||||||
i 10
|
i 10
|
||||||
drop table t1;
|
drop table t1;
|
||||||
create table t1 (a char(10), unique (a));
|
create table t1 (a char(10), unique (a));
|
||||||
insert into t1 values ('a');
|
insert into t1 values ('a');
|
||||||
|
34
mysql-test/r/innodb-big.result
Normal file
34
mysql-test/r/innodb-big.result
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
DROP TABLE IF EXISTS t1, t2, t3, t4;
|
||||||
|
CREATE TABLE t1 (id INTEGER) ENGINE=MYISAM;
|
||||||
|
CREATE TABLE t2 (id INTEGER primary key) ENGINE=INNODB;
|
||||||
|
CREATE TABLE t3 (a char(32) primary key,id INTEGER) ENGINE=INNODB;
|
||||||
|
CREATE TABLE t4 (a char(32) primary key,id INTEGER) ENGINE=MYISAM;
|
||||||
|
INSERT INTO t1 (id) VALUES (1);
|
||||||
|
INSERT INTO t1 SELECT id+1 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+16 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+32 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+64 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+128 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+256 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+512 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+1024 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2048 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4096 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8192 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+16384 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+32768 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+65536 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+131072 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+262144 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+524288 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+1048576 FROM t1;
|
||||||
|
INSERT INTO t2 SELECT * FROM t1;
|
||||||
|
INSERT INTO t3 SELECT concat(id),id from t2 ORDER BY -id;
|
||||||
|
INSERT INTO t4 SELECT * from t3 ORDER BY concat(a);
|
||||||
|
select sum(id) from t3;
|
||||||
|
sum(id)
|
||||||
|
2199024304128
|
||||||
|
drop table t1,t2,t3,t4;
|
108
mysql-test/r/sum_distinct-big.result
Normal file
108
mysql-test/r/sum_distinct-big.result
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
using_big_test
|
||||||
|
0
|
||||||
|
CREATE TABLE t1 (id INTEGER);
|
||||||
|
CREATE TABLE t2 (id INTEGER);
|
||||||
|
INSERT INTO t1 (id) VALUES (1), (1), (1),(1);
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1;
|
||||||
|
/* 8 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1;
|
||||||
|
/* 12 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1;
|
||||||
|
/* 16 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1;
|
||||||
|
/* 20 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1;
|
||||||
|
/* 24 */
|
||||||
|
INSERT INTO t1 SELECT id+1 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+16 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+32 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+64 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+128 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+256 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+512 FROM t1;
|
||||||
|
SELECT AVG(DISTINCT id) FROM t1 GROUP BY id % 13;
|
||||||
|
AVG(DISTINCT id)
|
||||||
|
513.5000
|
||||||
|
508.0000
|
||||||
|
509.0000
|
||||||
|
510.0000
|
||||||
|
511.0000
|
||||||
|
512.0000
|
||||||
|
513.0000
|
||||||
|
514.0000
|
||||||
|
515.0000
|
||||||
|
516.0000
|
||||||
|
517.0000
|
||||||
|
511.5000
|
||||||
|
512.5000
|
||||||
|
SELECT SUM(DISTINCT id)/COUNT(DISTINCT id) FROM t1 GROUP BY id % 13;
|
||||||
|
SUM(DISTINCT id)/COUNT(DISTINCT id)
|
||||||
|
513.50000
|
||||||
|
508.00000
|
||||||
|
509.00000
|
||||||
|
510.00000
|
||||||
|
511.00000
|
||||||
|
512.00000
|
||||||
|
513.00000
|
||||||
|
514.00000
|
||||||
|
515.00000
|
||||||
|
516.00000
|
||||||
|
517.00000
|
||||||
|
511.50000
|
||||||
|
512.50000
|
||||||
|
INSERT INTO t1 SELECT id+1024 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2048 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4096 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8192 FROM t1;
|
||||||
|
INSERT INTO t2 SELECT id FROM t1 ORDER BY id*rand();
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1;
|
||||||
|
sm
|
||||||
|
134225920
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t2;
|
||||||
|
sm
|
||||||
|
134225920
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1 group by id % 13;
|
||||||
|
sm
|
||||||
|
10327590
|
||||||
|
10328851
|
||||||
|
10330112
|
||||||
|
10331373
|
||||||
|
10332634
|
||||||
|
10317510
|
||||||
|
10318770
|
||||||
|
10320030
|
||||||
|
10321290
|
||||||
|
10322550
|
||||||
|
10323810
|
||||||
|
10325070
|
||||||
|
10326330
|
||||||
|
SET max_heap_table_size=16384;
|
||||||
|
SHOW variables LIKE 'max_heap_table_size';
|
||||||
|
Variable_name Value
|
||||||
|
max_heap_table_size 16384
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1;
|
||||||
|
sm
|
||||||
|
134225920
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t2;
|
||||||
|
sm
|
||||||
|
134225920
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1 GROUP BY id % 13;
|
||||||
|
sm
|
||||||
|
10327590
|
||||||
|
10328851
|
||||||
|
10330112
|
||||||
|
10331373
|
||||||
|
10332634
|
||||||
|
10317510
|
||||||
|
10318770
|
||||||
|
10320030
|
||||||
|
10321290
|
||||||
|
10322550
|
||||||
|
10323810
|
||||||
|
10325070
|
||||||
|
10326330
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP TABLE t2;
|
@ -95,109 +95,3 @@ SELECT SUM(DISTINCT id % 11) FROM t1;
|
|||||||
SUM(DISTINCT id % 11)
|
SUM(DISTINCT id % 11)
|
||||||
55
|
55
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
CREATE TABLE t1 (id INTEGER);
|
|
||||||
CREATE TABLE t2 (id INTEGER);
|
|
||||||
INSERT INTO t1 (id) VALUES (1), (1), (1),(1);
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1;
|
|
||||||
/* 8 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1;
|
|
||||||
/* 12 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1;
|
|
||||||
/* 16 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1;
|
|
||||||
/* 20 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1;
|
|
||||||
/* 24 */
|
|
||||||
INSERT INTO t1 SELECT id+1 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+2 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+4 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+8 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+16 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+32 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+64 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+128 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+256 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+512 FROM t1;
|
|
||||||
SELECT AVG(DISTINCT id) FROM t1 GROUP BY id % 13;
|
|
||||||
AVG(DISTINCT id)
|
|
||||||
513.5000
|
|
||||||
508.0000
|
|
||||||
509.0000
|
|
||||||
510.0000
|
|
||||||
511.0000
|
|
||||||
512.0000
|
|
||||||
513.0000
|
|
||||||
514.0000
|
|
||||||
515.0000
|
|
||||||
516.0000
|
|
||||||
517.0000
|
|
||||||
511.5000
|
|
||||||
512.5000
|
|
||||||
SELECT SUM(DISTINCT id)/COUNT(DISTINCT id) FROM t1 GROUP BY id % 13;
|
|
||||||
SUM(DISTINCT id)/COUNT(DISTINCT id)
|
|
||||||
513.50000
|
|
||||||
508.00000
|
|
||||||
509.00000
|
|
||||||
510.00000
|
|
||||||
511.00000
|
|
||||||
512.00000
|
|
||||||
513.00000
|
|
||||||
514.00000
|
|
||||||
515.00000
|
|
||||||
516.00000
|
|
||||||
517.00000
|
|
||||||
511.50000
|
|
||||||
512.50000
|
|
||||||
INSERT INTO t1 SELECT id+1024 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+2048 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+4096 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+8192 FROM t1;
|
|
||||||
INSERT INTO t2 SELECT id FROM t1 ORDER BY id*rand();
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1;
|
|
||||||
sm
|
|
||||||
134225920
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t2;
|
|
||||||
sm
|
|
||||||
134225920
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1 group by id % 13;
|
|
||||||
sm
|
|
||||||
10327590
|
|
||||||
10328851
|
|
||||||
10330112
|
|
||||||
10331373
|
|
||||||
10332634
|
|
||||||
10317510
|
|
||||||
10318770
|
|
||||||
10320030
|
|
||||||
10321290
|
|
||||||
10322550
|
|
||||||
10323810
|
|
||||||
10325070
|
|
||||||
10326330
|
|
||||||
SET max_heap_table_size=16384;
|
|
||||||
SHOW variables LIKE 'max_heap_table_size';
|
|
||||||
Variable_name Value
|
|
||||||
max_heap_table_size 16384
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1;
|
|
||||||
sm
|
|
||||||
134225920
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t2;
|
|
||||||
sm
|
|
||||||
134225920
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1 GROUP BY id % 13;
|
|
||||||
sm
|
|
||||||
10327590
|
|
||||||
10328851
|
|
||||||
10330112
|
|
||||||
10331373
|
|
||||||
10332634
|
|
||||||
10317510
|
|
||||||
10318770
|
|
||||||
10320030
|
|
||||||
10321290
|
|
||||||
10322550
|
|
||||||
10323810
|
|
||||||
10325070
|
|
||||||
10326330
|
|
||||||
DROP TABLE t1;
|
|
||||||
DROP TABLE t2;
|
|
||||||
|
@ -280,14 +280,14 @@ explain select * from t1 where v='a';
|
|||||||
select v,count(*) from t1 group by v limit 10;
|
select v,count(*) from t1 group by v limit 10;
|
||||||
select v,count(t) from t1 group by v limit 10;
|
select v,count(t) from t1 group by v limit 10;
|
||||||
select v,count(c) from t1 group by v limit 10;
|
select v,count(c) from t1 group by v limit 10;
|
||||||
select sql_big_result v,count(t) from t1 group by v limit 10;
|
select sql_big_result trim(v),count(t) from t1 group by v limit 10;
|
||||||
select sql_big_result v,count(c) from t1 group by v limit 10;
|
select sql_big_result trim(v),count(c) from t1 group by v limit 10;
|
||||||
select c,count(*) from t1 group by c limit 10;
|
select c,count(*) from t1 group by c limit 10;
|
||||||
select c,count(t) from t1 group by c limit 10;
|
select c,count(t) from t1 group by c limit 10;
|
||||||
select sql_big_result c,count(t) from t1 group by c limit 10;
|
select sql_big_result c,count(t) from t1 group by c limit 10;
|
||||||
select t,count(*) from t1 group by t limit 10;
|
select t,count(*) from t1 group by t limit 10;
|
||||||
select t,count(t) from t1 group by t limit 10;
|
select t,count(t) from t1 group by t limit 10;
|
||||||
select sql_big_result t,count(t) from t1 group by t limit 10;
|
select sql_big_result trim(t),count(t) from t1 group by t limit 10;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
#
|
#
|
||||||
|
46
mysql-test/t/innodb-big.test
Normal file
46
mysql-test/t/innodb-big.test
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
#
|
||||||
|
# Test some things that takes a long time
|
||||||
|
|
||||||
|
-- source include/big_test.inc
|
||||||
|
-- source include/have_innodb.inc
|
||||||
|
|
||||||
|
--disable_warnings
|
||||||
|
DROP TABLE IF EXISTS t1, t2, t3, t4;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
#
|
||||||
|
# Test test how filesort and buffered-record-reads works with innodb
|
||||||
|
#
|
||||||
|
|
||||||
|
CREATE TABLE t1 (id INTEGER) ENGINE=MYISAM;
|
||||||
|
CREATE TABLE t2 (id INTEGER primary key) ENGINE=INNODB;
|
||||||
|
CREATE TABLE t3 (a char(32) primary key,id INTEGER) ENGINE=INNODB;
|
||||||
|
CREATE TABLE t4 (a char(32) primary key,id INTEGER) ENGINE=MYISAM;
|
||||||
|
|
||||||
|
INSERT INTO t1 (id) VALUES (1);
|
||||||
|
INSERT INTO t1 SELECT id+1 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+16 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+32 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+64 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+128 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+256 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+512 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+1024 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2048 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4096 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8192 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+16384 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+32768 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+65536 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+131072 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+262144 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+524288 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+1048576 FROM t1;
|
||||||
|
INSERT INTO t2 SELECT * FROM t1;
|
||||||
|
INSERT INTO t3 SELECT concat(id),id from t2 ORDER BY -id;
|
||||||
|
INSERT INTO t4 SELECT * from t3 ORDER BY concat(a);
|
||||||
|
select sum(id) from t3;
|
||||||
|
drop table t1,t2,t3,t4;
|
67
mysql-test/t/sum_distinct-big.test
Normal file
67
mysql-test/t/sum_distinct-big.test
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
#
|
||||||
|
# Various tests for SUM(DISTINCT ...)
|
||||||
|
#
|
||||||
|
|
||||||
|
--source include/big_test.inc
|
||||||
|
--disable_warnings
|
||||||
|
DROP TABLE IF EXISTS t1, t2;
|
||||||
|
--enable_warnings
|
||||||
|
|
||||||
|
#
|
||||||
|
# Test the case when distinct values doesn't fit in memory and
|
||||||
|
# filesort is used (see uniques.cc:merge_walk)
|
||||||
|
#
|
||||||
|
|
||||||
|
CREATE TABLE t1 (id INTEGER);
|
||||||
|
CREATE TABLE t2 (id INTEGER);
|
||||||
|
|
||||||
|
INSERT INTO t1 (id) VALUES (1), (1), (1),(1);
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1; /* 8 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1; /* 12 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1; /* 16 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1; /* 20 */
|
||||||
|
INSERT INTO t1 (id) SELECT id FROM t1; /* 24 */
|
||||||
|
INSERT INTO t1 SELECT id+1 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+16 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+32 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+64 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+128 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+256 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+512 FROM t1;
|
||||||
|
|
||||||
|
# Just test that AVG(DISTINCT) is there
|
||||||
|
SELECT AVG(DISTINCT id) FROM t1 GROUP BY id % 13;
|
||||||
|
SELECT SUM(DISTINCT id)/COUNT(DISTINCT id) FROM t1 GROUP BY id % 13;
|
||||||
|
|
||||||
|
INSERT INTO t1 SELECT id+1024 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+2048 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+4096 FROM t1;
|
||||||
|
INSERT INTO t1 SELECT id+8192 FROM t1;
|
||||||
|
INSERT INTO t2 SELECT id FROM t1 ORDER BY id*rand();
|
||||||
|
|
||||||
|
# SELECT '++++++++++++++++++++++++++++++++++++++++++++++++++';
|
||||||
|
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1;
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t2;
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1 group by id % 13;
|
||||||
|
|
||||||
|
# this limit for max_heap_table_size is set to force testing the case, when
|
||||||
|
# all distinct sum values can not fit in memory and must be stored in a
|
||||||
|
# temporary table
|
||||||
|
|
||||||
|
SET max_heap_table_size=16384;
|
||||||
|
|
||||||
|
# to check that max_heap_table_size was actually set (hard limit for minimum
|
||||||
|
# max_heap_table_size is set in mysqld.cc):
|
||||||
|
|
||||||
|
SHOW variables LIKE 'max_heap_table_size';
|
||||||
|
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1;
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t2;
|
||||||
|
SELECT SUM(DISTINCT id) sm FROM t1 GROUP BY id % 13;
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP TABLE t2;
|
@ -93,62 +93,3 @@ SELECT SUM(DISTINCT id) FROM t1;
|
|||||||
SELECT SUM(DISTINCT id % 11) FROM t1;
|
SELECT SUM(DISTINCT id % 11) FROM t1;
|
||||||
|
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
#
|
|
||||||
# Test the case when distinct values doesn't fit in memory and
|
|
||||||
# filesort is used (see uniques.cc:merge_walk)
|
|
||||||
#
|
|
||||||
|
|
||||||
CREATE TABLE t1 (id INTEGER);
|
|
||||||
CREATE TABLE t2 (id INTEGER);
|
|
||||||
|
|
||||||
INSERT INTO t1 (id) VALUES (1), (1), (1),(1);
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1; /* 8 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1; /* 12 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1; /* 16 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1; /* 20 */
|
|
||||||
INSERT INTO t1 (id) SELECT id FROM t1; /* 24 */
|
|
||||||
INSERT INTO t1 SELECT id+1 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+2 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+4 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+8 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+16 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+32 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+64 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+128 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+256 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+512 FROM t1;
|
|
||||||
|
|
||||||
# Just test that AVG(DISTINCT) is there
|
|
||||||
SELECT AVG(DISTINCT id) FROM t1 GROUP BY id % 13;
|
|
||||||
SELECT SUM(DISTINCT id)/COUNT(DISTINCT id) FROM t1 GROUP BY id % 13;
|
|
||||||
|
|
||||||
INSERT INTO t1 SELECT id+1024 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+2048 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+4096 FROM t1;
|
|
||||||
INSERT INTO t1 SELECT id+8192 FROM t1;
|
|
||||||
INSERT INTO t2 SELECT id FROM t1 ORDER BY id*rand();
|
|
||||||
|
|
||||||
# SELECT '++++++++++++++++++++++++++++++++++++++++++++++++++';
|
|
||||||
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1;
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t2;
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1 group by id % 13;
|
|
||||||
|
|
||||||
# this limit for max_heap_table_size is set to force testing the case, when
|
|
||||||
# all distinct sum values can not fit in memory and must be stored in a
|
|
||||||
# temporary table
|
|
||||||
|
|
||||||
SET max_heap_table_size=16384;
|
|
||||||
|
|
||||||
# to check that max_heap_table_size was actually set (hard limit for minimum
|
|
||||||
# max_heap_table_size is set in mysqld.cc):
|
|
||||||
|
|
||||||
SHOW variables LIKE 'max_heap_table_size';
|
|
||||||
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1;
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t2;
|
|
||||||
SELECT SUM(DISTINCT id) sm FROM t1 GROUP BY id % 13;
|
|
||||||
|
|
||||||
DROP TABLE t1;
|
|
||||||
DROP TABLE t2;
|
|
||||||
|
@ -127,7 +127,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
|
|||||||
param.ref_length= table->file->ref_length;
|
param.ref_length= table->file->ref_length;
|
||||||
param.addon_field= 0;
|
param.addon_field= 0;
|
||||||
param.addon_length= 0;
|
param.addon_length= 0;
|
||||||
if (!(table->s->tmp_table || table->fulltext_searched))
|
if (!(table->file->table_flags() & HA_FAST_KEY_READ) &&
|
||||||
|
!table->fulltext_searched)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Get the descriptors of all fields whose values are appended
|
Get the descriptors of all fields whose values are appended
|
||||||
@ -1301,27 +1302,29 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
|
|||||||
uint length= 0;
|
uint length= 0;
|
||||||
uint fields= 0;
|
uint fields= 0;
|
||||||
uint null_fields= 0;
|
uint null_fields= 0;
|
||||||
|
query_id_t query_id= thd->query_id;
|
||||||
/*
|
/*
|
||||||
If there is a reference to a field in the query add it
|
If there is a reference to a field in the query add it
|
||||||
to the the set of appended fields.
|
to the the set of appended fields.
|
||||||
Note for future refinement:
|
Note for future refinement:
|
||||||
This this a too strong condition.
|
This this a too strong condition.
|
||||||
Actually we need only the fields referred in the
|
Actually we need only the fields referred in the
|
||||||
result set. And for some of them it makes sense to use
|
result set. And for some of them it makes sense to use
|
||||||
the values directly from sorted fields.
|
the values directly from sorted fields.
|
||||||
*/
|
*/
|
||||||
*plength= 0;
|
*plength= 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
The following statement is added to avoid sorting in alter_table.
|
The following statement is added to avoid sorting in alter_table.
|
||||||
The fact is the filter 'field->query_id != thd->query_id'
|
The fact is the filter 'field->query_id != thd->query_id'
|
||||||
doesn't work for alter table
|
doesn't work for alter table
|
||||||
*/
|
*/
|
||||||
if (thd->lex->sql_command != SQLCOM_SELECT)
|
if (thd->lex->sql_command != SQLCOM_SELECT &&
|
||||||
|
thd->lex->sql_command != SQLCOM_INSERT_SELECT)
|
||||||
return 0;
|
return 0;
|
||||||
for (pfield= ptabfield; (field= *pfield) ; pfield++)
|
for (pfield= ptabfield; (field= *pfield) ; pfield++)
|
||||||
{
|
{
|
||||||
if (field->query_id != thd->query_id)
|
if (field->query_id != query_id)
|
||||||
continue;
|
continue;
|
||||||
if (field->flags & BLOB_FLAG)
|
if (field->flags & BLOB_FLAG)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -85,7 +85,6 @@ class ha_innobase: public handler
|
|||||||
ha_innobase(TABLE *table): handler(table),
|
ha_innobase(TABLE *table): handler(table),
|
||||||
int_table_flags(HA_REC_NOT_IN_SEQ |
|
int_table_flags(HA_REC_NOT_IN_SEQ |
|
||||||
HA_NULL_IN_KEY |
|
HA_NULL_IN_KEY |
|
||||||
HA_FAST_KEY_READ |
|
|
||||||
HA_CAN_INDEX_BLOBS |
|
HA_CAN_INDEX_BLOBS |
|
||||||
HA_CAN_SQL_HANDLER |
|
HA_CAN_SQL_HANDLER |
|
||||||
HA_NOT_EXACT_COUNT |
|
HA_NOT_EXACT_COUNT |
|
||||||
|
@ -47,14 +47,22 @@
|
|||||||
#define HA_ADMIN_WRONG_CHECKSUM -8
|
#define HA_ADMIN_WRONG_CHECKSUM -8
|
||||||
|
|
||||||
/* Bits in table_flags() to show what database can do */
|
/* Bits in table_flags() to show what database can do */
|
||||||
#define HA_READ_RND_SAME (1 << 0) /* can switch index during the scan
|
|
||||||
with ::rnd_same() - not used yet.
|
/*
|
||||||
see mi_rsame/heap_rsame/myrg_rsame */
|
Can switch index during the scan with ::rnd_same() - not used yet.
|
||||||
|
see mi_rsame/heap_rsame/myrg_rsame
|
||||||
|
*/
|
||||||
|
#define HA_READ_RND_SAME (1 << 0)
|
||||||
#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
|
#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
|
||||||
#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber;
|
#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber;
|
||||||
It returns a position to ha_r_rnd */
|
It returns a position to ha_r_rnd */
|
||||||
#define HA_CAN_GEOMETRY (1 << 4)
|
#define HA_CAN_GEOMETRY (1 << 4)
|
||||||
#define HA_FAST_KEY_READ (1 << 5) /* no need for a record cache in filesort */
|
/*
|
||||||
|
Reading keys in random order is as fast as reading keys in sort order
|
||||||
|
(Used in records.cc to decide if we should use a record cache and by
|
||||||
|
filesort to decide if we should sort key + data or key + pointer-to-row
|
||||||
|
*/
|
||||||
|
#define HA_FAST_KEY_READ (1 << 5)
|
||||||
#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
|
#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
|
||||||
#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */
|
#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */
|
||||||
#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
|
#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
|
||||||
@ -62,9 +70,11 @@
|
|||||||
#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
|
#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
|
||||||
#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
|
#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
|
||||||
#define HA_NOT_EXACT_COUNT (1 << 13)
|
#define HA_NOT_EXACT_COUNT (1 << 13)
|
||||||
#define HA_CAN_INSERT_DELAYED (1 << 14) /* only handlers with table-level locks
|
/*
|
||||||
need no special code to support
|
INSERT_DELAYED only works with handlers that uses MySQL internal table
|
||||||
INSERT DELAYED */
|
level locks
|
||||||
|
*/
|
||||||
|
#define HA_CAN_INSERT_DELAYED (1 << 14)
|
||||||
#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
|
#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
|
||||||
#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
|
#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
|
||||||
#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
|
#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
|
||||||
|
@ -116,7 +116,7 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
|
|||||||
The following parameters is to decide when to use an extra cache to
|
The following parameters is to decide when to use an extra cache to
|
||||||
optimise seeks when reading a big table in sorted order
|
optimise seeks when reading a big table in sorted order
|
||||||
*/
|
*/
|
||||||
#define MIN_FILE_LENGTH_TO_USE_ROW_CACHE (16L*1024*1024)
|
#define MIN_FILE_LENGTH_TO_USE_ROW_CACHE (10L*1024*1024)
|
||||||
#define MIN_ROWS_TO_USE_TABLE_CACHE 100
|
#define MIN_ROWS_TO_USE_TABLE_CACHE 100
|
||||||
#define MIN_ROWS_TO_USE_BULK_INSERT 100
|
#define MIN_ROWS_TO_USE_BULK_INSERT 100
|
||||||
|
|
||||||
|
@ -89,7 +89,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||||||
(ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE &&
|
(ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE &&
|
||||||
info->io_cache->end_of_file/info->ref_length * table->s->reclength >
|
info->io_cache->end_of_file/info->ref_length * table->s->reclength >
|
||||||
(my_off_t) MIN_ROWS_TO_USE_TABLE_CACHE &&
|
(my_off_t) MIN_ROWS_TO_USE_TABLE_CACHE &&
|
||||||
!table->s->blob_fields)
|
!table->s->blob_fields &&
|
||||||
|
info->ref_length <= MAX_REFLENGTH)
|
||||||
{
|
{
|
||||||
if (! init_rr_cache(thd, info))
|
if (! init_rr_cache(thd, info))
|
||||||
{
|
{
|
||||||
@ -348,7 +349,8 @@ static int init_rr_cache(THD *thd, READ_RECORD *info)
|
|||||||
MYF(0))))
|
MYF(0))))
|
||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
#ifdef HAVE_purify
|
#ifdef HAVE_purify
|
||||||
bzero(info->cache,rec_cache_size); // Avoid warnings in qsort
|
// Avoid warnings in qsort
|
||||||
|
bzero(info->cache,rec_cache_size+info->cache_records* info->struct_length+1);
|
||||||
#endif
|
#endif
|
||||||
DBUG_PRINT("info",("Allocated buffert for %d records",info->cache_records));
|
DBUG_PRINT("info",("Allocated buffert for %d records",info->cache_records));
|
||||||
info->read_positions=info->cache+rec_cache_size;
|
info->read_positions=info->cache+rec_cache_size;
|
||||||
|
@ -8157,6 +8157,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||||||
*/
|
*/
|
||||||
(*argp)->maybe_null=1;
|
(*argp)->maybe_null=1;
|
||||||
}
|
}
|
||||||
|
new_field->query_id= thd->query_id;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -8203,6 +8204,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
|||||||
group_null_items++;
|
group_null_items++;
|
||||||
new_field->flags|= GROUP_FLAG;
|
new_field->flags|= GROUP_FLAG;
|
||||||
}
|
}
|
||||||
|
new_field->query_id= thd->query_id;
|
||||||
*(reg_field++) =new_field;
|
*(reg_field++) =new_field;
|
||||||
}
|
}
|
||||||
if (!--hidden_field_count)
|
if (!--hidden_field_count)
|
||||||
|
Reference in New Issue
Block a user