1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

Merge bk-internal:/home/bk/mysql-4.0

into sergbook.mysql.com:/usr/home/serg/Abk/mysql-4.0
This commit is contained in:
serg@sergbook.mysql.com
2003-04-25 15:45:45 +04:00
89 changed files with 593 additions and 367 deletions

View File

@ -1 +0,0 @@
logo_nusphere_b.tif

View File

@ -850,7 +850,13 @@ typedef char bool; /* Ordinary boolean values 0 1 */
((uint32) (uchar) (A)[0])))
#define sint4korr(A) (*((long *) (A)))
#define uint2korr(A) (*((uint16 *) (A)))
#ifdef HAVE_purify
#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\
(((uint32) ((uchar) (A)[2])) << 16))
#else
#define uint3korr(A) (long) (*((unsigned long *) (A)) & 0xFFFFFF)
#endif
#define uint4korr(A) (*((unsigned long *) (A)))
#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\

View File

@ -2780,9 +2780,6 @@ loop:
mutex_exit(&kernel_mutex);
/* We run purge and a batch of ibuf_contract every 10 seconds, even
if the server were active: */
for (i = 0; i < 10; i++) {
n_ios_old = log_sys->n_log_ios + buf_pool->n_pages_read
+ buf_pool->n_pages_written;
@ -2805,6 +2802,11 @@ loop:
goto suspend_thread;
}
if (srv_fast_shutdown && srv_shutdown_state > 0) {
goto background_loop;
}
/* We flush the log once in a second even if no commit
is issued or the we have specified in my.cnf no flush
at transaction commit */
@ -2832,11 +2834,6 @@ loop:
log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
log_flush_to_disk();
}
if (srv_fast_shutdown && srv_shutdown_state > 0) {
goto background_loop;
}
if (srv_activity_count == old_activity_count) {
@ -2867,7 +2864,7 @@ loop:
if (n_pend_ios < 3 && (n_ios - n_ios_very_old < 200)) {
srv_main_thread_op_info = (char*) "flushing buffer pool pages";
buf_flush_batch(BUF_FLUSH_LIST, 50, ut_dulint_max);
buf_flush_batch(BUF_FLUSH_LIST, 100, ut_dulint_max);
srv_main_thread_op_info = (char*) "flushing log";
log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
@ -2926,7 +2923,13 @@ background_loop:
/* Flush a few oldest pages to make the checkpoint younger */
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 10, ut_dulint_max);
if (srv_fast_shutdown && srv_shutdown_state > 0) {
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100,
ut_dulint_max);
} else {
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 10,
ut_dulint_max);
}
srv_main_thread_op_info = (char*)"making checkpoint";
@ -2993,7 +2996,8 @@ background_loop:
}
mutex_exit(&kernel_mutex);
srv_main_thread_op_info = (char*) "waiting for buffer pool flush to end";
srv_main_thread_op_info =
(char*) "waiting for buffer pool flush to end";
buf_flush_wait_batch_end(BUF_FLUSH_LIST);
srv_main_thread_op_info = (char*)"making checkpoint";

View File

@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Ger tillbaka en struct med information om isam-filen */
/* Return useful base information for an open table */
#include "myisamdef.h"
#ifdef __WIN__

View File

@ -27,6 +27,8 @@ PRIMARY KEY (userID)
INSERT INTO t2 VALUES (1,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (2,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (3,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (4,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (5,'name','pass','mail','Y','v','n','adr','1','1','1');
SELECT t2.userid, MIN(t1.score) FROM t1, t2 WHERE t1.userID=t2.userID GROUP BY t2.userid;
userid MIN(t1.score)
1 1
@ -47,8 +49,12 @@ userid MIN(t1.score+0.0)
2 2.0
SELECT t2.userid, MIN(t1.score+0.0) FROM t1, t2 WHERE t1.userID=t2.userID AND t1.spID=2 GROUP BY t2.userid ORDER BY NULL;
userid MIN(t1.score+0.0)
1 1.0
2 2.0
1 1.0
EXPLAIN SELECT t2.userid, MIN(t1.score+0.0) FROM t1, t2 WHERE t1.userID=t2.userID AND t1.spID=2 GROUP BY t2.userid ORDER BY NULL;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL 4 Using where; Using temporary
t2 eq_ref PRIMARY PRIMARY 4 t1.userID 1 Using index
drop table test.t1,test.t2;
CREATE TABLE t1 (
PID int(10) unsigned NOT NULL auto_increment,
@ -242,34 +248,32 @@ score smallint(5) unsigned,
key (spid),
key (score)
);
INSERT INTO t1 VALUES (1,1,1),(2,2,2),(2,1,1),(3,3,3),(4,3,3),(5,3,3);
INSERT INTO t1 VALUES (1,1,1),(2,2,2),(2,1,1),(3,3,3),(4,3,3),(5,3,3),(6,3,3),(7,3,3);
explain select userid,count(*) from t1 group by userid desc;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
t1 ALL NULL NULL NULL NULL 8 Using temporary; Using filesort
explain select userid,count(*) from t1 group by userid desc order by null;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL 6 Using temporary
t1 ALL NULL NULL NULL NULL 8 Using temporary
select userid,count(*) from t1 group by userid desc;
userid count(*)
3 3
3 5
2 1
1 2
select userid,count(*) from t1 group by userid desc having (count(*)+1) IN (4,3);
userid count(*)
3 3
1 2
select userid,count(*) from t1 group by userid desc having 3 IN (1,COUNT(*));
userid count(*)
3 3
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid desc;
table type possible_keys key key_len ref rows Extra
t1 range spID spID 5 NULL 2 Using where; Using index
t1 range spID spID 5 NULL 3 Using where; Using index
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid;
table type possible_keys key key_len ref rows Extra
t1 range spID spID 5 NULL 2 Using where; Using index
t1 range spID spID 5 NULL 3 Using where; Using index
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid order by null;
table type possible_keys key key_len ref rows Extra
t1 range spID spID 5 NULL 2 Using where; Using index
t1 range spID spID 5 NULL 3 Using where; Using index
select spid,count(*) from t1 where spid between 1 and 2 group by spid;
spid count(*)
1 1
@ -280,12 +284,14 @@ spid count(*)
1 1
explain select sql_big_result spid,sum(userid) from t1 group by spid desc;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL 6 Using filesort
t1 ALL NULL NULL NULL NULL 8 Using filesort
explain select sql_big_result spid,sum(userid) from t1 group by spid desc order by null;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL 6
t1 ALL NULL NULL NULL NULL 8
select sql_big_result spid,sum(userid) from t1 group by spid desc;
spid sum(userid)
7 3
6 3
5 3
4 3
3 3
@ -293,13 +299,13 @@ spid sum(userid)
1 1
explain select sql_big_result score,count(*) from t1 group by score desc;
table type possible_keys key key_len ref rows Extra
t1 index NULL score 3 NULL 6 Using index
t1 index NULL score 3 NULL 8 Using index
explain select sql_big_result score,count(*) from t1 group by score desc order by null;
table type possible_keys key key_len ref rows Extra
t1 index NULL score 3 NULL 6 Using index
t1 index NULL score 3 NULL 8 Using index
select sql_big_result score,count(*) from t1 group by score desc;
score count(*)
3 3
3 5
2 1
1 2
drop table t1;
@ -535,15 +541,7 @@ t1 ALL NULL NULL NULL NULL 6 Using temporary
t2 ALL a NULL NULL NULL 4 Using where
drop table t1,t2;
create table t1 (a int, b int);
insert into t1 values (1, 4);
insert into t1 values (10, 40);
insert into t1 values (1, 4);
insert into t1 values (10, 43);
insert into t1 values (1, 4);
insert into t1 values (10, 41);
insert into t1 values (1, 4);
insert into t1 values (10, 43);
insert into t1 values (1, 4);
insert into t1 values (1, 4),(10, 40),(1, 4),(10, 43),(1, 4),(10, 41),(1, 4),(10, 43),(1, 4);
select a, MAX(b), INTERVAL (MAX(b), 1,3,10,30,39,40,50,60,100,1000) from t1 group by a;
a MAX(b) INTERVAL (MAX(b), 1,3,10,30,39,40,50,60,100,1000)
1 4 2

View File

@ -1201,3 +1201,74 @@ a b
8 5
9 5
drop table t1,t2;
create table t1 (a int not null auto_increment primary key, b int, c int, key(c)) type=innodb;
create table t2 (a int not null auto_increment primary key, b int);
insert into t1 (b) values (null),(null),(null),(null),(null),(null),(null);
insert into t2 (a) select b from t1;
insert into t1 (b) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
select count(*) from t1;
count(*)
29267
explain select a from t1 where a between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 range PRIMARY PRIMARY 4 NULL 14790 Using where; Using index
explain select * from t1 where a between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 range PRIMARY PRIMARY 4 NULL 14790 Using where
explain select * from t1 where c between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 range c c 5 NULL 1 Using where
update t1 set c=a;
explain select * from t1 where c between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 ALL c NULL NULL NULL 29537 Using where
drop table t1,t2;
create table t1 (id int primary key auto_increment, fk int, index index_fk (fk)) type=innodb;
insert into t1 (id) values (null),(null),(null),(null),(null);
update t1 set fk=69 where fk is null order by id limit 1;
SELECT * from t1;
id fk
2 NULL
3 NULL
4 NULL
5 NULL
1 69
drop table t1;
create table t1 (a int not null, b int not null, key (a));
insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3);
SET @tmp=0;
update t1 set b=(@tmp:=@tmp+1) order by a;
update t1 set b=99 where a=1 order by b asc limit 1;
update t1 set b=100 where a=1 order by b desc limit 2;
update t1 set a=a+10+b where a=1 order by b;
select * from t1 order by a,b;
a b
2 4
2 5
2 6
3 7
3 8
3 9
3 10
3 11
3 12
13 2
111 100
111 100
drop table t1;

View File

@ -373,10 +373,10 @@ Thimble Smith Happy 3 3 3
drop table t1,t2;
create table t1 (id int not null, str char(10), index(str));
insert into t1 values (1, null), (2, null), (3, "foo"), (4, "bar");
select * from t1 where str is not null;
select * from t1 where str is not null order by id;
id str
4 bar
3 foo
4 bar
select * from t1 where str is null;
id str
1 NULL

View File

@ -286,7 +286,7 @@ a b
107 7
108 8
109 9
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b where t1.b between 3 and 5;
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b+10 where t1.b between 3 and 5 and t2.a=t1.a-100;
select * from t1;
a b
201 1
@ -300,13 +300,13 @@ a b
109 9
select * from t2;
a b
1 3
2 3
3 3
4 3
5 3
6 3
7 3
8 3
9 3
1 1
2 2
3 13
4 14
5 15
6 6
7 7
8 8
9 9
drop table t1,t2;

View File

@ -84,7 +84,7 @@ table type possible_keys key key_len ref rows Extra
t1 range a,b a 5 NULL 5 Using where
explain select * from t1 where (a is null or a = 7) and b=7 and c=0;
table type possible_keys key key_len ref rows Extra
t1 range a,b a 5 NULL 4 Using where
t1 ALL a,b NULL NULL NULL 12 Using where
explain select * from t1 where (a is null and b>a) or a is null and b=7 limit 2;
table type possible_keys key key_len ref rows Extra
t1 ref a,b a 5 const 3 Using where

View File

@ -1,6 +1,6 @@
drop table if exists t1;
SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=4, SQL_MAX_JOIN_SIZE=9;
create table t1 (a int primary key, b char(20));
create table t1 (a int auto_increment primary key, b char(20));
insert into t1 values(1,"test");
SELECT SQL_BUFFER_RESULT * from t1;
a b
@ -30,10 +30,34 @@ You are using safe update mode and you tried to update a table without a WHERE t
delete from t1 where a+0=1;
You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
select 1 from t1,t1 as t2,t1 as t3,t1 as t4,t1 as t5;
The SELECT would examine too many records and probably take a very long time. Check your WHERE and use SET OPTION SQL_BIG_SELECTS=1 if the SELECT is ok
The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok
update t1 set b="a" limit 1;
update t1 set b="a" where b="b" limit 2;
delete from t1 where b="test" limit 1;
delete from t1 where a+0=1 limit 2;
SET MAX_JOIN_SIZE=2;
SELECT @@MAX_JOIN_SIZE, @@SQL_BIG_SELECTS;
@@max_join_size @@sql_big_selects
2 0
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
SELECT * from t1;
The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok
SET SQL_BIG_SELECTS=1;
SELECT * from t1;
a b
3 a
2 test2
4 a
5 a
SET MAX_JOIN_SIZE=2;
SELECT * from t1;
The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok
SET MAX_JOIN_SIZE=DEFAULT;
SELECT * from t1;
a b
3 a
2 test2
4 a
5 a
drop table t1;
SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, SQL_MAX_JOIN_SIZE=DEFAULT;

View File

@ -102,13 +102,39 @@ select status from t1;
status
1
drop table t1;
create table t1 (a int not null, b int not null);
insert into t1 values (1,1),(1,2),(1,3);
update t1 set b=4 where a=1 order by b asc limit 1;
update t1 set b=4 where a=1 order by b desc limit 1;
select * from t1;
create table t1 (a int not null, b int not null, key (a));
insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3);
SET @tmp=0;
update t1 set b=(@tmp:=@tmp+1) order by a;
update t1 set b=99 where a=1 order by b asc limit 1;
select * from t1 order by a,b;
a b
1 4
1 2
1 4
1 3
1 99
2 4
2 5
2 6
3 7
3 8
3 9
3 10
3 11
3 12
update t1 set b=100 where a=1 order by b desc limit 2;
update t1 set a=a+10+b where a=1 order by b;
select * from t1 order by a,b;
a b
2 4
2 5
2 6
3 7
3 8
3 9
3 10
3 11
3 12
13 2
111 100
111 100
drop table t1;

View File

@ -34,12 +34,15 @@ CREATE TABLE t2 (
INSERT INTO t2 VALUES (1,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (2,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (3,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (4,'name','pass','mail','Y','v','n','adr','1','1','1');
INSERT INTO t2 VALUES (5,'name','pass','mail','Y','v','n','adr','1','1','1');
SELECT t2.userid, MIN(t1.score) FROM t1, t2 WHERE t1.userID=t2.userID GROUP BY t2.userid;
SELECT t2.userid, MIN(t1.score) FROM t1, t2 WHERE t1.userID=t2.userID GROUP BY t2.userid ORDER BY NULL;
SELECT t2.userid, MIN(t1.score) FROM t1, t2 WHERE t1.userID=t2.userID AND t1.spID=2 GROUP BY t2.userid;
SELECT t2.userid, MIN(t1.score+0.0) FROM t1, t2 WHERE t1.userID=t2.userID AND t1.spID=2 GROUP BY t2.userid;
SELECT t2.userid, MIN(t1.score+0.0) FROM t1, t2 WHERE t1.userID=t2.userID AND t1.spID=2 GROUP BY t2.userid ORDER BY NULL;
EXPLAIN SELECT t2.userid, MIN(t1.score+0.0) FROM t1, t2 WHERE t1.userID=t2.userID AND t1.spID=2 GROUP BY t2.userid ORDER BY NULL;
drop table test.t1,test.t2;
#
@ -236,7 +239,7 @@ CREATE TABLE t1 (
key (score)
);
INSERT INTO t1 VALUES (1,1,1),(2,2,2),(2,1,1),(3,3,3),(4,3,3),(5,3,3);
INSERT INTO t1 VALUES (1,1,1),(2,2,2),(2,1,1),(3,3,3),(4,3,3),(5,3,3),(6,3,3),(7,3,3);
explain select userid,count(*) from t1 group by userid desc;
explain select userid,count(*) from t1 group by userid desc order by null;
select userid,count(*) from t1 group by userid desc;
@ -255,8 +258,6 @@ explain select sql_big_result score,count(*) from t1 group by score desc order b
select sql_big_result score,count(*) from t1 group by score desc;
drop table t1;
#
# not purely group_by bug, but group_by is involved...
create table t1 (a date default null, b date default null);
@ -265,7 +266,6 @@ select a,min(b) c,count(distinct rand()) from t1 group by a having c<a + interva
drop table t1;
# Compare with hash keys
#
CREATE TABLE t1 (a char(1));
INSERT INTO t1 VALUES ('A'),('B'),('A'),('B'),('A'),('B'),(NULL),('a'),('b'),(NULL),('A'),('B'),(NULL);
@ -401,15 +401,7 @@ drop table t1,t2;
#
create table t1 (a int, b int);
insert into t1 values (1, 4);
insert into t1 values (10, 40);
insert into t1 values (1, 4);
insert into t1 values (10, 43);
insert into t1 values (1, 4);
insert into t1 values (10, 41);
insert into t1 values (1, 4);
insert into t1 values (10, 43);
insert into t1 values (1, 4);
insert into t1 values (1, 4),(10, 40),(1, 4),(10, 43),(1, 4),(10, 41),(1, 4),(10, 43),(1, 4);
select a, MAX(b), INTERVAL (MAX(b), 1,3,10,30,39,40,50,60,100,1000) from t1 group by a;
select a, MAX(b), CASE MAX(b) when 4 then 4 when 43 then 43 else 0 end from t1 group by a;
select a, MAX(b), FIELD(MAX(b), '43', '4', '5') from t1 group by a;

View File

@ -795,5 +795,58 @@ select * from t1;
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b where t1.b between 3 and 5;
select * from t1;
select * from t2;
drop table t1,t2;
#
# Test that MySQL priorities clustered indexes
#
create table t1 (a int not null auto_increment primary key, b int, c int, key(c)) type=innodb;
create table t2 (a int not null auto_increment primary key, b int);
insert into t1 (b) values (null),(null),(null),(null),(null),(null),(null);
insert into t2 (a) select b from t1;
insert into t1 (b) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
select count(*) from t1;
explain select a from t1 where a between 1 and 10000;
explain select * from t1 where a between 1 and 10000;
explain select * from t1 where c between 1 and 10000;
update t1 set c=a;
explain select * from t1 where c between 1 and 10000;
drop table t1,t2;
#
# Test of UPDATE ... ORDER BY
#
create table t1 (id int primary key auto_increment, fk int, index index_fk (fk)) type=innodb;
insert into t1 (id) values (null),(null),(null),(null),(null);
update t1 set fk=69 where fk is null order by id limit 1;
SELECT * from t1;
drop table t1;
create table t1 (a int not null, b int not null, key (a));
insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3);
SET @tmp=0;
update t1 set b=(@tmp:=@tmp+1) order by a;
update t1 set b=99 where a=1 order by b asc limit 1;
update t1 set b=100 where a=1 order by b desc limit 2;
update t1 set a=a+10+b where a=1 order by b;
select * from t1 order by a,b;
drop table t1;

View File

@ -248,7 +248,7 @@ drop table t1,t2;
create table t1 (id int not null, str char(10), index(str));
insert into t1 values (1, null), (2, null), (3, "foo"), (4, "bar");
select * from t1 where str is not null;
select * from t1 where str is not null order by id;
select * from t1 where str is null;
drop table t1;

View File

@ -247,7 +247,7 @@ update t1,t2 set t1.b=t1.b+10 where t1.b=2;
select * from t1;
# Range key (in t1)
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b where t1.b between 3 and 5;
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b+10 where t1.b between 3 and 5 and t2.a=t1.a-100;
select * from t1;
select * from t2;

View File

@ -4,7 +4,7 @@
drop table if exists t1;
SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=4, SQL_MAX_JOIN_SIZE=9;
create table t1 (a int primary key, b char(20));
create table t1 (a int auto_increment primary key, b char(20));
insert into t1 values(1,"test");
SELECT SQL_BUFFER_RESULT * from t1;
update t1 set b="a" where a=1;
@ -15,18 +15,40 @@ update t1 set b="a" where a=1;
select 1 from t1,t1 as t2,t1 as t3,t1 as t4;
# The following should give errors:
!$1175 update t1 set b="a";
!$1175 update t1 set b="a" where b="test";
!$1175 delete from t1;
!$1175 delete from t1 where b="test";
!$1175 delete from t1 where a+0=1;
!$1104 select 1 from t1,t1 as t2,t1 as t3,t1 as t4,t1 as t5;
--error 1175
update t1 set b="a";
--error 1175
update t1 set b="a" where b="test";
--error 1175
delete from t1;
--error 1175
delete from t1 where b="test";
--error 1175
delete from t1 where a+0=1;
--error 1104
select 1 from t1,t1 as t2,t1 as t3,t1 as t4,t1 as t5;
# The following should be ok:
update t1 set b="a" limit 1;
update t1 set b="a" where b="b" limit 2;
delete from t1 where b="test" limit 1;
delete from t1 where a+0=1 limit 2;
# Test SQL_BIG_SELECTS
SET MAX_JOIN_SIZE=2;
SELECT @@MAX_JOIN_SIZE, @@SQL_BIG_SELECTS;
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
--error 1104
SELECT * from t1;
SET SQL_BIG_SELECTS=1;
SELECT * from t1;
SET MAX_JOIN_SIZE=2;
--error 1104
SELECT * from t1;
SET MAX_JOIN_SIZE=DEFAULT;
SELECT * from t1;
drop table t1;
SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, SQL_MAX_JOIN_SIZE=DEFAULT;

View File

@ -81,9 +81,13 @@ drop table t1;
# Test of ORDER BY
#
create table t1 (a int not null, b int not null);
insert into t1 values (1,1),(1,2),(1,3);
update t1 set b=4 where a=1 order by b asc limit 1;
update t1 set b=4 where a=1 order by b desc limit 1;
select * from t1;
create table t1 (a int not null, b int not null, key (a));
insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3);
SET @tmp=0;
update t1 set b=(@tmp:=@tmp+1) order by a;
update t1 set b=99 where a=1 order by b asc limit 1;
select * from t1 order by a,b;
update t1 set b=100 where a=1 order by b desc limit 2;
update t1 set a=a+10+b where a=1 order by b;
select * from t1 order by a,b;
drop table t1;

View File

@ -1,5 +1,11 @@
#! /bin/sh
# debug
#set -x
# stop on errors
set -e
for package in . ./innobase
do
(cd $package

View File

@ -1,8 +1,11 @@
#! /bin/sh
#debug
# debug
#set -x
# stop on errors
set -e
if test ! -r ./sql/mysqld.cc
then
echo "you must start from the top source directory"

View File

@ -1,5 +1,11 @@
#! /bin/sh
# debug
#set -x
# stop on errors
set -e
path=`dirname $0`
# clean

View File

@ -1,8 +1,11 @@
#! /bin/sh
#debug
# debug
#set -x
# stop on errors
set -e
if test ! -r ./sql/mysqld.cc
then
echo "you must start from the top source directory"

View File

@ -1,5 +1,11 @@
#! /bin/sh
# debug
#set -x
# stop on errors
set -e
path=`dirname $0`
$path/compile-netware-standard

View File

@ -1,5 +1,11 @@
#! /bin/sh
# debug
#set -x
# stop on errors
set -e
path=`dirname $0`
. $path/compile-netware-START

View File

@ -1,5 +1,12 @@
#! /bin/sh
# debug
#set -x
# stop on errors
set -e
path=`dirname $0`
. $path/compile-netware-START

46
netware/BUILD/cron-build Executable file
View File

@ -0,0 +1,46 @@
#! /bin/sh
# debug
#set -x
# stop on errors
set -e
# repository direcotry
repo_dir=`pwd`
# show usage
show_usage()
{
cat << EOF
usage: cron-patch
EOF
exit 0;
}
echo "starting build..."
# check for bk and repo_dir
bk help > /dev/null
repo_dir=`bk root $repo_dir`
cd $repo_dir
# pull latest code
echo 'y' | bk pull
# determine version
version=`grep -e "AM_INIT_AUTOMAKE(mysql, .*)" < configure.in | sed -e "s/AM_INIT_AUTOMAKE(mysql, \(.*\))/\1/"`
echo "version: $version"
# latest revision
rev=`bk changes -e -n -d':REV:' | head -1`
echo "latest revision: $rev"
# run bootstrap
./netware/BUILD/nwbootstrap --revision=$rev --suffix=$rev --build=all
echo "done"

4
netware/BUILD/crontab Executable file
View File

@ -0,0 +1,4 @@
00 23 * * * (export PATH='/usr/local/bin:/usr/bin:/bin'; export DISPLAY=':0'; cd ~/bk/mysqldoc; echo 'y' | bk pull)
00 00 * * * (export PATH='/usr/local/bin:/usr/bin:/bin'; export DISPLAY=':0'; cd ~/bk/mysql-4.0; ./netware/BUILD/cron-build)
00 04 * * * (export PATH='/usr/local/bin:/usr/bin:/bin'; export DISPLAY=':0'; cd ~/bk/mysql-4.1; ./netware/BUILD/cron-build)

View File

@ -1,5 +1,8 @@
#! /bin/sh
# stop on errors
set -e
args=" $*"
wine --debugmsg -all -- mwasmnlm $args

View File

@ -1,5 +1,8 @@
#! /bin/sh
# stop on errors
set -e
# mwccnlm is having a hard time understanding "-I./../include"
# convert it to "-I../include"
args=" "`echo $* | sed -e 's/-I.\/../-I../g'`

View File

@ -1,5 +1,8 @@
#! /bin/sh
# stop on errors
set -e
args=" $*"
wine --debugmsg -all -- mwldnlm $args

View File

@ -3,11 +3,11 @@
# debug
#set -x
path=`dirname $0`
# stop on errors
set -e
path=`dirname $0`
# repository direcotry
repo_dir=`pwd`
@ -24,6 +24,7 @@ temp_dir=""
revision=""
rev=""
build=""
suffix=""
mwenv=""
# show usage
@ -81,6 +82,7 @@ for arg do
--wine-build-dir=*) wine_build_dir=`echo "$arg" | sed -e "s;--wine-build-dir=;;"` ;;
--revision=*) revision=`echo "$arg" | sed -e "s;--revision=;;"` ;;
--build=*) build=`echo "$arg" | sed -e "s;--build=;;"` ;;
--suffix=*) suffix=`echo "$arg" | sed -e "s;--suffix=;;"` ;;
--doc-dir=*) doc_dir=`echo "$arg" | sed -e "s;--doc-dir=;;"` ;;
*) show_usage ;;
esac
@ -111,6 +113,12 @@ echo "version: $version"
# build target directory
target_dir="$build_dir/mysql-$version"
# add suffix
if test $suffix
then
target_dir="$target_dir-$suffix"
fi
# delete any old target
if test -d $target_dir.old; then rm -rf $target_dir.old; fi

View File

@ -1,12 +0,0 @@
This directory contains a set of test cases for replication. To get it
to work on your system, install this version of MySQL on the master and on
the slave, configure them according to the Replication HOWTO in the manual,
modify include/master-slave.inc to specify correct connection parameters
for the master and the slave and do
sh run-all-tests
If you would like to add your own test case, create a directory
test-your-test-case-name, write your own run.test following the examples
in the other test cases. Note that you can create the files containing
the expected output ( master files) by running mysqltest --record < run.test

View File

@ -1,2 +0,0 @@
connect (slave,localhost,root,,test,0,/var/lib/mysql/mysql.sock);
connect (master,sarochka,admin,,test,0,0);

View File

@ -1,9 +0,0 @@
#! /bin/sh
for d in test-*; do
cd $d
echo -n $d | sed -e s/test-//
echo -n "...."
../../client/mysqltest $@ < run.test
cd ..
done

View File

@ -1,10 +0,0 @@
source ../include/master-slave.inc;
connection master;
drop table if exists x;
create table x(n int auto_increment primary key);
set insert_id = 2000;
insert into x values (NULL),(NULL),(NULL);
connection slave;
sleep 3;
@x.master select * from x;

View File

@ -1,4 +0,0 @@
n
2000
2001
2002

View File

@ -1,10 +0,0 @@
source ../include/master-slave.inc;
connection master;
drop table if exists x;
create table x(n int primary key);
!insert into x values (1),(2),(2);
insert into x values (3);
connection slave;
sleep 3;
@x.master select * from x;

View File

@ -1,4 +0,0 @@
n
1
2
3

View File

@ -1,25 +0,0 @@
source ../include/master-slave.inc;
connection slave;
!slave stop;
flush slave;
connection master;
flush master;
connection slave;
slave start;
connection master;
use test;
drop table if exists words;
create table words (word char(20) not null, index(word));
load data infile '/usr/dict/words' into table words;
drop table if exists words1;
create table words1 (word char(20) not null);
load data infile '/usr/dict/words' into table words1;
sleep 20;
connection slave;
use test;
drop table if exists words;
load table words from master;
drop table if exists words1;
load table words1 from master;
@table-dump-check.master check table words;
@table-dump-select.master select count(*) from words1;

View File

@ -1,2 +0,0 @@
Table Op Msg_type Msg_text
test.words check status OK

View File

@ -1,2 +0,0 @@
count(*)
45402

View File

@ -1,12 +0,0 @@
source ../include/master-slave.inc;
connection master;
drop table if exists test;
CREATE TABLE test (name varchar(64), age smallint(3));
INSERT INTO test SET name='Andy', age=31;
INSERT test SET name='Jacob', age=2;
INSERT into test SET name='Caleb', age=1;
ALTER TABLE test ADD id int(8) ZEROFILL AUTO_INCREMENT PRIMARY KEY;
@test.master select * from test;
connection slave;
sleep 3;
@test.master select * from test;

View File

@ -1,4 +0,0 @@
name age id
Andy 31 00000001
Jacob 2 00000002
Caleb 1 00000003

View File

@ -1,2 +0,0 @@
unix_timestamp(t)
200006

View File

@ -1,2 +0,0 @@
unix_timestamp(t)
973999074

View File

@ -1,17 +0,0 @@
#! ../client/mysqltest
# tests if the replicaion preserves timestamp properly
source ../include/master-slave.inc;
connection master;
set timestamp=200006;
drop table if exists foo;
create table foo(t timestamp not null,a char(1));
insert into foo ( a) values ('F');
@repl-timestamp.master select unix_timestamp(t) from foo;
sleep 3;
connection slave;
drop table if exists foo;
load table foo from master;
@repl-timestamp.master select unix_timestamp(t) from foo;

View File

@ -1,3 +0,0 @@
n
1
2

View File

@ -1,3 +0,0 @@
n
1
2

View File

@ -1,24 +0,0 @@
source ../include/master-slave.inc;
connection slave;
!slave stop;
connection master;
flush master;
connection slave;
flush slave;
!slave start;
sleep 3;
connection master;
use test;
drop table if exists words;
create table words (word char(20) not null, index(word));
load data infile '/usr/dict/words' into table words;
drop table if exists foo;
create table foo(n int);
insert into foo values(1),(2);
@foo-dump-master.master select * from foo;
@sum-wlen-master.master select sum(length(word)) from words;
connection slave;
sleep 15;
use test;
@sum-wlen-slave.master select sum(length(word)) from words;
@foo-dump-slave.master select * from foo;

View File

@ -1,2 +0,0 @@
sum(length(word))
363634

View File

@ -1,2 +0,0 @@
sum(length(word))
363634

View File

@ -3322,11 +3322,11 @@ bool Field_newdate::get_date(TIME *ltime,bool fuzzydate)
if (is_null())
return 1;
uint32 tmp=(uint32) uint3korr(ptr);
bzero((char*) ltime,sizeof(*ltime));
ltime->day= tmp & 31;
ltime->month= (tmp >> 5) & 15;
ltime->year= (tmp >> 9);
ltime->time_type=TIMESTAMP_DATE;
ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0;
return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0;
}

View File

@ -54,8 +54,9 @@ class ha_heap: public handler
uint max_keys() const { return MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return HA_MAX_REC_LENGTH; }
virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
double scan_time() { return (double) (records+deleted) / 20.0+10; }
double read_time(uint index, uint ranges, ha_rows rows)
{ return (double) rows / 20.0+1; }
virtual bool fast_key_read() { return 1;}
int open(const char *name, int mode, uint test_if_locked);

View File

@ -873,6 +873,7 @@ innobase_flush_logs(void)
DBUG_ENTER("innobase_flush_logs");
log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
log_flush_to_disk();
DBUG_RETURN(result);
}
@ -3542,10 +3543,8 @@ ha_innobase::records_in_range(
/*************************************************************************
Gives an UPPER BOUND to the number of rows in a table. This is used in
filesort.cc and the upper bound must hold. TODO: Since the number of
rows in a table may change after this function is called, we still may
get a 'Sort aborted' error in filesort.cc of MySQL. The ultimate fix is to
improve the algorithm of filesort.cc. */
filesort.cc and its better if the upper bound hold.
*/
ha_rows
ha_innobase::estimate_number_of_rows(void)
@ -3611,6 +3610,29 @@ ha_innobase::scan_time()
return((double) (prebuilt->table->stat_clustered_index_size));
}
/*
Calculate the time it takes to read a set of ranges through and index
This enables us to optimise reads for clustered indexes.
*/
double ha_innobase::read_time(uint index, uint ranges, ha_rows rows)
{
ha_rows total_rows;
double time_for_scan;
if (index != table->primary_key)
return handler::read_time(index, ranges, rows); // Not clustered
if (rows <= 2)
return (double) rows;
/*
Assume that the read is proportional to scan time for all rows + one
seek per range.
*/
time_for_scan= scan_time();
if ((total_rows= estimate_number_of_rows()) < rows)
return time_for_scan;
return (ranges + (double) rows / (double) total_rows * time_for_scan);
}
/*************************************************************************
Returns statistics information of the table to the MySQL interpreter,
in various fields of the handle object. */
@ -3735,6 +3757,23 @@ ha_innobase::info(
DBUG_VOID_RETURN;
}
/**************************************************************************
Updates index cardinalities of the table, based on 10 random dives into
each index tree. This does NOT calculate exact statistics of the table. */
int
ha_innobase::analyze(
/*=================*/
/* out: returns always 0 (success) */
THD* thd, /* in: connection thread handle */
HA_CHECK_OPT* check_opt) /* in: currently ignored */
{
/* Simply call ::info() with all the flags */
info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE);
return(0);
}
/***********************************************************************
Tries to check that an InnoDB table is not corrupted. If corruption is
noticed, prints to stderr information about it. In case of corruption

View File

@ -126,6 +126,7 @@ class ha_innobase: public handler
void initialize(void);
int close(void);
double scan_time();
double read_time(uint index, uint ranges, ha_rows rows);
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
@ -151,6 +152,7 @@ class ha_innobase: public handler
void position(const byte *record);
void info(uint);
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
int extra(enum ha_extra_function operation);
int reset(void);
int external_lock(THD *thd, int lock_type);

View File

@ -227,7 +227,8 @@ public:
void change_table_ptr(TABLE *table_arg) { table=table_arg; }
virtual double scan_time()
{ return ulonglong2double(data_file_length) / IO_SIZE + 1; }
virtual double read_time(ha_rows rows) { return rows2double(rows); }
virtual double read_time(uint index, uint ranges, ha_rows rows)
{ return rows2double(ranges+rows); }
virtual bool fast_key_read() { return 0;}
virtual key_map keys_to_use_for_scanning() { return 0; }
virtual bool has_transactions(){ return 0;}

View File

@ -1066,7 +1066,7 @@ bool Item_sum_count_distinct::setup(THD *thd)
int Item_sum_count_distinct::tree_to_myisam()
{
if (create_myisam_from_heap(table, tmp_table_param,
if (create_myisam_from_heap(current_thd, table, tmp_table_param,
HA_ERR_RECORD_FILE_FULL, 1) ||
tree_walk(&tree, (tree_walk_action)&dump_leaf, (void*)this,
left_root_right))
@ -1120,7 +1120,8 @@ bool Item_sum_count_distinct::add()
if (error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE)
{
if (create_myisam_from_heap(table, tmp_table_param, error,1))
if (create_myisam_from_heap(current_thd, table, tmp_table_param, error,
1))
return 1; // Not a table_is_full error
}
}

View File

@ -42,7 +42,7 @@
#define MAIN_THD
#define SIGNAL_THD
#ifdef PURIFY
#ifdef HAVE_purify
#define IF_PURIFY(A,B) (A)
#else
#define IF_PURIFY(A,B) (B)

View File

@ -284,7 +284,7 @@ typedef struct st_qsel_param {
KEY_PART *key_parts,*key_parts_end,*key[MAX_KEY];
MEM_ROOT *mem_root;
table_map prev_tables,read_tables,current_table;
uint baseflag,keys,max_key_part;
uint baseflag, keys, max_key_part, range_count;
uint real_keynr[MAX_KEY];
char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
@ -710,8 +710,10 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
(double) keys_per_block);
}
else
found_read_time= head->file->read_time(found_records)+
(double) found_records / TIME_FOR_COMPARE;
found_read_time= (head->file->read_time(keynr,
param.range_count,
found_records)+
(double) found_records / TIME_FOR_COMPARE);
if (read_time > found_read_time)
{
read_time=found_read_time;
@ -2113,11 +2115,12 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree)
if (!tree)
DBUG_RETURN(HA_POS_ERROR); // Can't use it
param->max_key_part=0;
param->range_count=0;
if (tree->type == SEL_ARG::IMPOSSIBLE)
DBUG_RETURN(0L); // Impossible select. return
if (tree->type != SEL_ARG::KEY_RANGE || tree->part != 0)
DBUG_RETURN(HA_POS_ERROR); // Don't use tree
param->max_key_part=0;
records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0);
if (records != HA_POS_ERROR)
{
@ -2185,6 +2188,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
}
keynr=param->real_keynr[idx];
param->range_count++;
if (!tmp_min_flag && ! tmp_max_flag &&
(uint) key_tree->part+1 == param->table->key_info[keynr].key_parts &&
(param->table->key_info[keynr].flags & HA_NOSAME) &&

View File

@ -15,7 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Functions to read, write and lock records */
/* Functions for easy reading of records, possible through a cache */
#include "mysql_priv.h"

View File

@ -239,7 +239,7 @@ static sys_var_thd_bit sys_sql_big_tables("sql_big_tables",
#endif
static sys_var_thd_bit sys_big_selects("sql_big_selects",
set_option_bit,
OPTION_BIG_TABLES);
OPTION_BIG_SELECTS);
static sys_var_thd_bit sys_log_off("sql_log_off",
set_option_bit,
OPTION_LOG_OFF);

View File

@ -114,7 +114,7 @@
"Blob polo-B<>ka '%-.64s' nem<65><6D>e m<>t defaultn<74> hodnotu",
"Nep-B<><42>pustn<74> jm<6A>no datab<61>ze '%-.64s'",
"Nep-B<><42>pustn<74> jm<6A>no tabulky '%-.64s'",
"Zadan-B<> SELECT by proch<63>zel p<><70>li<6C> mnoho z<>znam<61> a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v po<70><6F>dku, pou<6F>ijte SET OPTION SQL_BIG_SELECTS=1",
"Zadan-B<> SELECT by proch<63>zel p<><70>li<6C> mnoho z<>znam<61> a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v po<70><6F>dku, pou<6F>ijte SET SQL_BIG_SELECTS=1",
"Nezn-B<>m<EFBFBD> chyba",
"Nezn-B<>m<EFBFBD> procedura %s",
"Chybn-B<> po<70>et parametr<74> procedury %s",

View File

@ -108,7 +108,7 @@
"BLOB feltet '%-.64s' kan ikke have en standard v<>rdi",
"Ugyldigt database navn '%-.64s'",
"Ugyldigt tabel navn '%-.64s'",
"SELECT ville unders<72>ge for mange poster og ville sandsynligvis tage meget lang tid. Unders<72>g WHERE delen og brug SET OPTION SQL_BIG_SELECTS=1 hvis udtrykket er korrekt",
"SELECT ville unders<72>ge for mange poster og ville sandsynligvis tage meget lang tid. Unders<72>g WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt",
"Ukendt fejl",
"Ukendt procedure %s",
"Forkert antal parametre til proceduren %s",

View File

@ -116,7 +116,7 @@
"Blob veld '%-.64s' can geen standaardwaarde bevatten",
"Databasenaam '%-.64s' is niet getoegestaan",
"Niet toegestane tabelnaam '%-.64s'",
"Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET OPTION SQL_BIG_SELECTS=1 als het stament in orde is.",
"Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is.",
"Onbekende Fout",
"Onbekende procedure %s",
"Foutief aantal parameters doorgegeven aan procedure %s",

View File

@ -105,7 +105,7 @@
"BLOB column '%-.64s' can't have a default value",
"Incorrect database name '%-.100s'",
"Incorrect table name '%-.100s'",
"The SELECT would examine too many records and probably take a very long time. Check your WHERE and use SET OPTION SQL_BIG_SELECTS=1 if the SELECT is ok",
"The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok",
"Unknown error",
"Unknown procedure '%-.64s'",
"Incorrect parameter count to procedure '%-.64s'",

View File

@ -110,7 +110,7 @@
"BLOB-t<><74>pi tulp '%-.64s' ei saa omada vaikev<65><76>rtust",
"Vigane andmebaasi nimi '%-.100s'",
"Vigane tabeli nimi '%-.100s'",
"SELECT lause peab l<>bi vaatama suure hulga kirjeid ja v<>taks t<>en<65>oliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada k<>sku SET OPTION SQL_BIG_SELECTS=1",
"SELECT lause peab l<>bi vaatama suure hulga kirjeid ja v<>taks t<>en<65>oliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada k<>sku SET SQL_BIG_SELECTS=1",
"Tundmatu viga",
"Tundmatu protseduur '%-.64s'",
"Vale parameetrite hulk protseduurile '%-.64s'",

View File

@ -105,7 +105,7 @@
"BLOB '%-.64s' ne peut avoir de valeur par d<>faut",
"Nom de base de donn<6E>e ill<6C>gal: '%-.64s'",
"Nom de table ill<6C>gal: '%-.64s'",
"SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. V<>rifiez la clause WHERE et utilisez SET OPTION SQL_BIG_SELECTS=1 si SELECT se passe bien",
"SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. V<>rifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien",
"Erreur inconnue",
"Proc<6F>dure %s inconnue",
"Mauvais nombre de param<61>tres pour la procedure %s",

View File

@ -108,7 +108,7 @@
"BLOB-Feld '%-.64s' kann keinen Vorgabewert (Default-Value) besitzen.",
"Unerlaubter Datenbankname '%-.64s'.",
"Unerlaubter Tabellenname '%-.64s'.",
"Die Ausf<73>hrung des SELECT w<>rde zu viele Datens<6E>tze untersuchen und wahrscheinlich sehr lange daueren. Bitte WHERE <20>berpr<70>fen und SET OPTION SQL_BIG_SELECTS=1 verwenden, sofern SELECT ok ist.",
"Die Ausf<73>hrung des SELECT w<>rde zu viele Datens<6E>tze untersuchen und wahrscheinlich sehr lange daueren. Bitte WHERE <20>berpr<70>fen und SET SQL_BIG_SELECTS=1 verwenden, sofern SELECT ok ist.",
"Unbekannter Fehler.",
"Unbekannte Procedure %-.64s.",
"Falsche Parameterzahl f<>r Procedure %-.64s.",

View File

@ -105,7 +105,7 @@
"<22><> Blob <20><><EFBFBD><EFBFBD><EFBFBD> '%-.64s' <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> (default value)",
"<22><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.100s'",
"<22><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.100s'",
"<22><> SELECT <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>. <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> WHERE <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SET OPTION SQL_BIG_SELECTS=1 <20><> <20><> SELECT <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>",
"<22><> SELECT <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>. <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> WHERE <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SET SQL_BIG_SELECTS=1 <20><> <20><> SELECT <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.64s'",
"<22><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.64s'",

View File

@ -107,7 +107,7 @@
"A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke",
"Hibas adatbazisnev: '%-.100s'",
"Hibas tablanev: '%-.100s'",
"A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET OPTION SQL_BIG_SELECTS=1 beallitast, ha a SELECT ok",
"A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT ok",
"Ismeretlen hiba",
"Ismeretlen eljaras: '%-.64s'",
"Rossz parameter a(z) '%-.64s'eljaras szamitasanal",

View File

@ -105,7 +105,7 @@
"Il campo BLOB '%-.64s' non puo` avere un valore di default",
"Nome database errato '%-.100s'",
"Nome tabella errato '%-.100s'",
"La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET OPTION SQL_BIG_SELECTS=1 se e` tutto a posto.",
"La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto.",
"Errore sconosciuto",
"Procedura '%-.64s' sconosciuta",
"Numero di parametri errato per la procedura '%-.64s'",

View File

@ -107,7 +107,7 @@
"BLOB column '%-.64s' can't have a default value",
"<22><><EFBFBD><EFBFBD><EAA4B7> database ̾ '%-.100s' <20><><EFBFBD>ְ<EFBFBD><D6B0>äƤ<C3A4><C6A4>ޤ<EFBFBD>",
"<22><><EFBFBD><EFBFBD><EAA4B7> table ̾ '%-.100s' <20>Ϥޤ<CFA4><DEA4><EFBFBD><EFBFBD>äƤ<C3A4><C6A4>ޤ<EFBFBD>",
"The SELECT would examine too many records and probably take a very long time. Check your WHERE and use SET OPTION SQL_BIG_SELECTS=1 if the SELECT is ok",
"The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok",
"Unknown error",
"Unknown procedure '%-.64s'",
"Incorrect parameter count to procedure '%-.64s'",

View File

@ -105,7 +105,7 @@
"BLOB Į<><C4AE> '%-.64s' <20><> <20><><EFBFBD><EFBFBD>Ʈ <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD>ϴ<EFBFBD>.",
"'%-.100s' <20><><EFBFBD><EFBFBD>Ÿ<EFBFBD><C5B8><EFBFBD>̽<EFBFBD><CCBD><EFBFBD> <20≯<EFBFBD><CCB8><EFBFBD> <20><><EFBFBD><EFBFBD>Ȯ<EFBFBD>մϴ<D5B4>.",
"'%-.100s' <20><><EFBFBD>̺<EFBFBD> <20≯<EFBFBD><CCB8><EFBFBD> <20><><EFBFBD><EFBFBD>Ȯ<EFBFBD>մϴ<D5B4>.",
"SELECT <20><><EFBFBD>ɿ<EFBFBD><C9BF><EFBFBD> <20>ʹ<EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD>ڵ带 ã<><C3A3> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20>ð<EFBFBD><C3B0><EFBFBD> <20>ҿ<EFBFBD><D2BF>˴ϴ<CBB4>. <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> WHERE <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>ϰų<CFB0>, <20><><EFBFBD><EFBFBD> SELECT<43><54> ok<6F>Ǹ<EFBFBD> SET OPTION SQL_BIG_SELECTS=1 <20>ɼ<EFBFBD><C9BC><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>ϼ<EFBFBD><CFBC><EFBFBD>.",
"SELECT <20><><EFBFBD>ɿ<EFBFBD><C9BF><EFBFBD> <20>ʹ<EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD>ڵ带 ã<><C3A3> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20>ð<EFBFBD><C3B0><EFBFBD> <20>ҿ<EFBFBD><D2BF>˴ϴ<CBB4>. <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> WHERE <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>ϰų<CFB0>, <20><><EFBFBD><EFBFBD> SELECT<43><54> ok<6F>Ǹ<EFBFBD> SET SQL_BIG_SELECTS=1 <20>ɼ<EFBFBD><C9BC><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>ϼ<EFBFBD><CFBC><EFBFBD>.",
"<22>˼<EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>Դϴ<D4B4>.",
"<22>˼<EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD>๮ : '%-.64s'",
"'%-.64s' <20><><EFBFBD><EFBFBD><E0B9AE> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD>Ȯ<EFBFBD><C8AE> <20>Ķ<EFBFBD><C4B6><EFBFBD><EFBFBD><EFBFBD>",

View File

@ -107,7 +107,7 @@
"Blob feltet '%-.64s' kan ikkje ha ein standard verdi",
"Ugyldig database namn '%-.64s'",
"Ugyldig tabell namn '%-.64s'",
"SELECT ville unders<72>kje for mange postar og ville sannsynligvis ta veldig lang tid. Unders<72>k WHERE klausulen og bruk SET OPTION SQL_BIG_SELECTS=1 om SELECTen er korrekt",
"SELECT ville unders<72>kje for mange postar og ville sannsynligvis ta veldig lang tid. Unders<72>k WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt",
"Ukjend feil",
"Ukjend prosedyre %s",
"Feil parameter tal til prosedyra %s",

View File

@ -107,7 +107,7 @@
"Blob feltet '%-.64s' kan ikke ha en standard verdi",
"Ugyldig database navn '%-.64s'",
"Ugyldig tabell navn '%-.64s'",
"SELECT ville unders<72>ke for mange poster og ville sannsynligvis ta veldig lang tid. Unders<72>k WHERE klausulen og bruk SET OPTION SQL_BIG_SELECTS=1 om SELECTen er korrekt",
"SELECT ville unders<72>ke for mange poster og ville sannsynligvis ta veldig lang tid. Unders<72>k WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt",
"Ukjent feil",
"Ukjent prosedyre %s",
"Feil parameter antall til prosedyren %s",

View File

@ -105,7 +105,7 @@
"Coluna BLOB '%-.64s' n<>o pode ter um valor padr<64>o (default)",
"Nome de banco de dados '%-.100s' incorreto",
"Nome de tabela '%-.100s' incorreto",
"O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cl<63>usula WHERE e use SET OPTION SQL_BIG_SELECTS=1, se o SELECT estiver correto",
"O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cl<63>usula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto",
"Erro desconhecido",
"'Procedure' '%-.64s' desconhecida",
"N<>mero de par<61>metros incorreto para a 'procedure' '%-.64s'",

View File

@ -109,7 +109,7 @@
"Coloana BLOB '%-.64s' nu poate avea o valoare default",
"Numele bazei de date este incorect '%-.100s'",
"Numele tabelei este incorect '%-.100s'",
"SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp. Verifica clauza WHERE si foloseste SET OPTION SQL_BIG_SELECTS=1 daca SELECT-ul e ok",
"SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp. Verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e ok",
"Eroare unknown",
"Procedura unknown '%-.64s'",
"Procedura '%-.64s' are un numar incorect de parametri",

View File

@ -107,7 +107,7 @@
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> BLOB '%-.64s'",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.100s'",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.100s'",
"<22><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SELECT <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20>, <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>, <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>. <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> WHERE, <20>, <20><><EFBFBD><EFBFBD> <20> <20><><EFBFBD> <20><><EFBFBD> <20> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>, <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SET OPTION SQL_BIG_SELECTS=1",
"<22><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SELECT <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20>, <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>, <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>. <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> WHERE, <20>, <20><><EFBFBD><EFBFBD> <20> <20><><EFBFBD> <20><><EFBFBD> <20> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>, <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SET SQL_BIG_SELECTS=1",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.64s'",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.64s'",

View File

@ -113,7 +113,7 @@
"Pole BLOB '%-.64s' nem<65><6D>e ma<6D> implicitn<74> hodnotu",
"Nepr<70>pustn<74> meno datab<61>zy '%-.100s'",
"Nepr<70>pustn<74> meno tabu<62>ky '%-.100s'",
"Zadan<61> po<70>iadavka SELECT by prech<63>dzala pr<70>li<6C> mnoho z<>znamov a trvala by pr<70>li<6C> dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou<6F>ite SET OPTION SQL_BIG_SELECTS=1",
"Zadan<61> po<70>iadavka SELECT by prech<63>dzala pr<70>li<6C> mnoho z<>znamov a trvala by pr<70>li<6C> dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou<6F>ite SET SQL_BIG_SELECTS=1",
"Nezn<7A>m<EFBFBD> chyba",
"Nezn<7A>m<EFBFBD> proced<65>ra '%-.64s'",
"Chybn<62> po<70>et parametrov proced<65>ry '%-.64s'",

View File

@ -106,7 +106,7 @@
"Campo Blob '%-.64s' no puede tener valores patron",
"Nombre de base de datos ilegal '%-.64s'",
"Nombre de tabla ilegal '%-.64s'",
"El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET OPTION SQL_BIG_SELECTS=1 si el SELECT esta correcto",
"El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto",
"Error desconocido",
"Procedimiento desconocido %s",
"Equivocado parametro count para procedimiento %s",

View File

@ -105,7 +105,7 @@
"BLOB f<>lt '%-.64s' kan inte ha ett DEFAULT-v<>rde",
"Felaktigt databasnamn '%-.64s'",
"Felaktigt tabellnamn '%-.64s'",
"Den angivna fr<66>gan skulle troligen ta mycket l<>ng tid! Kontrollera din WHERE och anv<6E>nd SET OPTION SQL_BIG_SELECTS=1 ifall du vill hantera stora joins",
"Den angivna fr<66>gan skulle l<EFBFBD>sa mer <20>n MAX_JOIN_SIZE rader. Kontrollera din WHERE och anv<6E>nd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins",
"Oidentifierat fel",
"Ok<4F>nd procedur: %s",
"Felaktigt antal parametrar till procedur %s",

View File

@ -110,7 +110,7 @@
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> BLOB '%-.64s' <20><> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>",
"<22><>צ<EFBFBD><D7A6><EFBFBD> <20><>'<27> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.100s'",
"<22><>צ<EFBFBD><D7A6><EFBFBD> <20><>'<27> <20><><EFBFBD><EFBFBD><EFBFBD>æ '%-.100s'",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SELECT <20><><EFBFBD>Ҧ<EFBFBD><D2A6><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD>Ӧ<EFBFBD>, <20><>, <20><><EFBFBD><EFBFBD><EFBFBD>, <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD>. <20><><EFBFBD><EFBFBD>צ<EFBFBD><D7A6><EFBFBD> <20><><EFBFBD><EFBFBD> WHERE <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SET OPTION SQL_BIG_SELECTS=1, <20><><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> SELECT <20> צ<><D7A6><EFBFBD><EFBFBD>",
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SELECT <20><><EFBFBD>Ҧ<EFBFBD><D2A6><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD>Ӧ<EFBFBD>, <20><>, <20><><EFBFBD><EFBFBD><EFBFBD>, <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD>. <20><><EFBFBD><EFBFBD>צ<EFBFBD><D7A6><EFBFBD> <20><><EFBFBD><EFBFBD> WHERE <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> SET SQL_BIG_SELECTS=1, <20><><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> SELECT <20> צ<><D7A6><EFBFBD><EFBFBD>",
"<22><>צ<EFBFBD><D7A6><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>",
"<22><>צ<EFBFBD><D7A6><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.64s'",
"<22><><EFBFBD><EFBFBD><EFBFBD> ˦<><CBA6>˦<EFBFBD><CBA6><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ҧ<EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> '%-.64s'",

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1829,7 +1829,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{
ha_rows rec;
double tmp;
THD *thd= current_thd;
THD *thd= join->thd;
if (!rest_tables)
{
@ -1960,7 +1960,10 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
if (table->quick_keys & ((key_map) 1 << key))
records= (double) table->quick_rows[key];
else
records= (double) s->records/rec; // quick_range couldn't use key!
{
/* quick_range couldn't use key! */
records= (double) s->records/rec;
}
}
else
{
@ -4306,12 +4309,11 @@ free_tmp_table(THD *thd, TABLE *entry)
* If a HEAP table gets full, create a MyISAM table and copy all rows to this
*/
bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
bool ignore_last_dupp_key_error)
bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
int error, bool ignore_last_dupp_key_error)
{
TABLE new_table;
const char *save_proc_info;
THD *thd=current_thd;
int write_err;
DBUG_ENTER("create_myisam_from_heap");
@ -5318,7 +5320,8 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE)
goto end;
if (create_myisam_from_heap(table, &join->tmp_table_param, error,1))
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error,1))
DBUG_RETURN(-1); // Not a table_is_full error
table->uniques=0; // To ensure rows are the same
}
@ -5395,7 +5398,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_funcs(join->tmp_table_param.items_to_copy);
if ((error=table->file->write_row(table->record[0])))
{
if (create_myisam_from_heap(table, &join->tmp_table_param, error, 0))
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
/* Change method to update rows */
table->file->index_init(0);
@ -5489,8 +5493,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if ((error=table->file->write_row(table->record[0])))
{
if (create_myisam_from_heap(table, &join->tmp_table_param,
error, 0))
if (create_myisam_from_heap(join->thd, table,
&join->tmp_table_param, error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
}
else
@ -6019,7 +6023,7 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
int error;
ulong reclength,offset;
uint field_count;
THD *thd= current_thd;
THD *thd= join->thd;
DBUG_ENTER("remove_duplicates");
entry->reginfo.lock_type=TL_WRITE;

View File

@ -195,8 +195,8 @@ void count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields,
bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,List<Item> &fields);
void copy_fields(TMP_TABLE_PARAM *param);
void copy_funcs(Item **func_ptr);
bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
bool ignore_last_dupp_error);
bool create_myisam_from_heap(THD *Thd, TABLE *table, TMP_TABLE_PARAM *param,
int error, bool ignore_last_dupp_error);
/* functions from opt_sum.cc */
int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds);

View File

@ -262,7 +262,8 @@ bool select_union::send_data(List<Item> &values)
fill_record(table->field,values);
if ((write_record(table,&info)))
{
if (create_myisam_from_heap(table, tmp_table_param, info.last_errno, 0))
if (create_myisam_from_heap(thd, table, tmp_table_param, info.last_errno,
0))
return 1;
}
return 0;

View File

@ -66,7 +66,10 @@ int mysql_update(THD *thd,
TABLE *table;
SQL_SELECT *select;
READ_RECORD info;
TABLE_LIST tables;
List<Item> all_fields;
DBUG_ENTER("mysql_update");
LINT_INIT(used_index);
LINT_INIT(timestamp_query_id);
@ -80,8 +83,13 @@ int mysql_update(THD *thd,
table->quick_keys=0;
want_privilege=table->grant.want_privilege;
table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege);
if (setup_tables(table_list) || setup_conds(thd,table_list,&conds)
|| setup_ftfuncs(thd))
bzero((char*) &tables,sizeof(tables)); // For ORDER BY
tables.table = table;
if (setup_tables(table_list) || setup_conds(thd,table_list,&conds) ||
setup_order(thd, &tables, all_fields, all_fields, order) ||
setup_ftfuncs(thd))
DBUG_RETURN(-1); /* purecov: inspected */
old_used_keys=table->used_keys; // Keys used in WHERE
@ -159,13 +167,6 @@ int mysql_update(THD *thd,
matching rows before updating the table!
*/
table->file->extra(HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE);
IO_CACHE tempfile;
if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
DISK_BUFFER_SIZE, MYF(MY_WME)))
{
delete select; /* purecov: inspected */
DBUG_RETURN(-1);
}
if (old_used_keys & ((key_map) 1 << used_index))
{
table->key_read=1;
@ -174,81 +175,97 @@ int mysql_update(THD *thd,
if (order)
{
/*
Doing an ORDER BY; Let filesort find and sort the rows we are going
to update
*/
uint length;
SORT_FIELD *sortorder;
TABLE_LIST tables;
List<Item> fields;
List<Item> all_fields;
ha_rows examined_rows;
bzero((char*) &tables,sizeof(tables));
tables.table = table;
table->io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
MYF(MY_FAE | MY_ZEROFILL));
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
if (!(sortorder=make_unireg_sortorder(order, &length)) ||
(table->found_records = filesort(table, sortorder, length,
(SQL_SELECT *) 0, 0L,
HA_POS_ERROR, &examined_rows))
== HA_POS_ERROR)
select, 0L,
limit, &examined_rows)) ==
HA_POS_ERROR)
{
delete select;
free_io_cache(table);
DBUG_RETURN(-1);
}
/*
Filesort has already found and selected the rows we want to update,
so we don't need the where clause
*/
delete select;
select= 0;
}
else
{
/*
We are doing a search on a key that is updated. In this case
we go trough the matching rows, save a pointer to them and
update these in a separate loop based on the pointer.
*/
IO_CACHE tempfile;
if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
DISK_BUFFER_SIZE, MYF(MY_WME)))
{
delete select; /* purecov: inspected */
DBUG_RETURN(-1);
}
init_read_record(&info,thd,table,select,0,1);
thd->proc_info="Searching rows for update";
uint tmp_limit= limit;
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skipp_record()))
{
table->file->position(table->record[0]);
if (my_b_write(&tempfile,table->file->ref,
table->file->ref_length))
{
error=1; /* purecov: inspected */
break; /* purecov: inspected */
}
if (!--limit && using_limit)
break;
}
}
end_read_record(&info);
/* Change select to use tempfile */
if (select)
{
delete select->quick;
if (select->free_cond)
delete select->cond;
select->quick=0;
select->cond=0;
}
else
{
select= new SQL_SELECT;
select->head=table;
}
if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
error=1; /* purecov: inspected */
select->file=tempfile; // Read row ptrs from this file
if (error >= 0)
{
delete select;
DBUG_RETURN(-1);
}
}
init_read_record(&info,thd,table,select,0,1);
thd->proc_info="Searching rows for update";
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skipp_record()))
{
table->file->position(table->record[0]);
if (my_b_write(&tempfile,table->file->ref,
table->file->ref_length))
{
error=1; /* purecov: inspected */
break; /* purecov: inspected */
}
}
else
{
if (!(test_flags & 512)) /* For debugging */
{
DBUG_DUMP("record",(char*) table->record[0],table->reclength);
}
}
}
end_read_record(&info);
if (table->key_read)
{
table->key_read=0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
/* Change select to use tempfile */
if (select)
{
delete select->quick;
if (select->free_cond)
delete select->cond;
select->quick=0;
select->cond=0;
}
else
{
select= new SQL_SELECT;
select->head=table;
}
if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
error=1; /* purecov: inspected */
select->file=tempfile; // Read row ptrs from this file
if (error >= 0)
{
delete select;
DBUG_RETURN(-1);
}
}
if (handle_duplicates == DUP_IGNORE)
@ -275,11 +292,6 @@ int mysql_update(THD *thd,
(byte*) table->record[0])))
{
updated++;
if (!--limit && using_limit)
{
error= -1;
break;
}
}
else if (handle_duplicates != DUP_IGNORE ||
error != HA_ERR_FOUND_DUPP_KEY)
@ -289,11 +301,17 @@ int mysql_update(THD *thd,
break;
}
}
if (!--limit && using_limit)
{
error= -1; // Simulate end of file
break;
}
}
else
table->file->unlock_row();
}
end_read_record(&info);
free_io_cache(table); // If ORDER BY
thd->proc_info="end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
transactional_table= table->file->has_transactions();
@ -741,7 +759,8 @@ bool multi_update::send_data(List<Item> &not_used_values)
(error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE))
{
if (create_myisam_from_heap(table, tmp_table_param + offset, error, 1))
if (create_myisam_from_heap(thd, table, tmp_table_param + offset,
error, 1))
{
do_update=0;
DBUG_RETURN(1); // Not a table_is_full error

View File

@ -492,7 +492,7 @@ static uchar* thai2sortable(const uchar * tstr,uint len)
}
pLeft4 = pRight4;*/
while(len--) {
if(isldvowel(*p) && isconsnt(p[1])) {
if(isldvowel(*p) && len > 0 && isconsnt(p[1])) {
*pRight1++ = t_ctype[p[1]][0];
*pRight2++ = t_ctype[p[1]][1];
*pRight3++ = t_ctype[p[1]][2];

View File

@ -33,6 +33,7 @@ sort_buffer_size = 2M
read_buffer_size = 2M
myisam_sort_buffer_size = 64M
thread_cache = 8
query_cache_size = 32M
# Try number of CPU's*2 for thread_concurrency
thread_concurrency = 8

View File

@ -33,6 +33,7 @@ sort_buffer_size = 1M
read_buffer_size = 1M
myisam_sort_buffer_size = 64M
thread_cache = 8
query_cache_size= 16M
# Try number of CPU's*2 for thread_concurrency
thread_concurrency = 8