1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge bk-internal.mysql.com:/home/bk/mysql-4.1

into mashka.mysql.fi:/home/my/mysql-4.1
This commit is contained in:
monty@mashka.mysql.fi
2003-10-15 22:52:31 +03:00
44 changed files with 484 additions and 128 deletions

View File

@@ -515,6 +515,7 @@ scripts/fill_func_tables.sql
scripts/fill_help_tables
scripts/fill_help_tables.sql
scripts/make_binary_distribution
scripts/make_sharedlib_distribution
scripts/make_win_src_distribution
scripts/msql2mysql
scripts/mysql_config

View File

@@ -1821,7 +1821,7 @@ AC_CHECK_FUNCS(alarm bmove \
cuserid fcntl fconvert poll \
getrusage getpwuid getcwd getrlimit getwd index stpcpy locking longjmp \
perror pread realpath readlink rename \
socket strnlen madvise mkstemp \
socket strnlen madvise mallinfo mkstemp \
strtol strtoul strtoll strtoull snprintf tempnam thr_setconcurrency \
gethostbyaddr_r gethostbyname_r getpwnam \
bfill bzero bcmp strstr strpbrk strerror \

View File

@@ -49,20 +49,24 @@
/* The following is parameter to ha_rkey() how to use key */
/* We define a complete-field prefix of a key value as a prefix where the
last included field in the prefix contains the full field, not just some bytes
from the start of the field. A partial-field prefix is allowed to
contain only a few first bytes from the last included field.
/*
We define a complete-field prefix of a key value as a prefix where
the last included field in the prefix contains the full field, not
just some bytes from the start of the field. A partial-field prefix
is allowed to contain only a few first bytes from the last included
field.
Below HA_READ_KEY_EXACT, ..., HA_READ_BEFORE_KEY can take a
complete-field prefix of a key value as the search key. HA_READ_PREFIX
and HA_READ_PREFIX_LAST could also take a partial-field prefix, but
currently (4.0.10) they are only used with complete-field prefixes. MySQL uses
a padding trick to implement LIKE 'abc%' queries.
Below HA_READ_KEY_EXACT, ..., HA_READ_BEFORE_KEY can take a
complete-field prefix of a key value as the search
key. HA_READ_PREFIX and HA_READ_PREFIX_LAST could also take a
partial-field prefix, but currently (4.0.10) they are only used with
complete-field prefixes. MySQL uses a padding trick to implement
LIKE 'abc%' queries.
NOTE that in InnoDB HA_READ_PREFIX_LAST will NOT work with a partial-field
prefix because InnoDB currently strips spaces from the end of varchar
fields! */
NOTE that in InnoDB HA_READ_PREFIX_LAST will NOT work with a
partial-field prefix because InnoDB currently strips spaces from the
end of varchar fields!
*/
enum ha_rkey_function {
HA_READ_KEY_EXACT, /* Find first record else error */

View File

@@ -52,9 +52,9 @@ void STDCALL cli_mysql_close(MYSQL *mysql);
MYSQL_FIELD * STDCALL cli_list_fields(MYSQL *mysql);
my_bool STDCALL cli_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt);
MYSQL_DATA *cli_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
uint fields);
MYSQL_DATA * STDCALL cli_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
uint fields);
int STDCALL cli_stmt_execute(MYSQL_STMT *stmt);
MYSQL_DATA *cli_read_binary_rows(MYSQL_STMT *stmt);
MYSQL_DATA * STDCALL cli_read_binary_rows(MYSQL_STMT *stmt);
int STDCALL cli_unbuffered_fetch(MYSQL *mysql, char **row);
const char * STDCALL cli_read_statistic(MYSQL *mysql);

View File

@@ -3098,7 +3098,7 @@ no_data:
Read all rows of data from server (binary format)
*/
MYSQL_DATA *cli_read_binary_rows(MYSQL_STMT *stmt)
MYSQL_DATA * STDCALL cli_read_binary_rows(MYSQL_STMT *stmt)
{
ulong pkt_len;
uchar *cp;

View File

@@ -81,7 +81,7 @@ EXPORTS
mysql_param_result
mysql_ping
mysql_prepare
mysql_prepare_result
mysql_get_metadata
mysql_query
mysql_read_query_result
mysql_real_connect

View File

@@ -1,3 +1,4 @@
drop table if exists t1;
CREATE TABLE t1 SELECT _utf8'test' as c1, _utf8'тест' as c2;
SHOW CREATE TABLE t1;
Table Create Table
@@ -15,6 +16,10 @@ t1 CREATE TABLE `t1` (
`c3` char(4) character set utf8 default NULL
) TYPE=MyISAM CHARSET=latin1
INSERT INTO t1 VALUES ('aaaabbbbccccdddd','aaaabbbbccccdddd','aaaabbbbccccdddd');
Warnings:
Warning 1264 Data truncated for column 'c1' at row 1
Warning 1264 Data truncated for column 'c2' at row 1
Warning 1264 Data truncated for column 'c3' at row 1
SELECT * FROM t1;
c1 c2 c3
aaaabbbbcccc aaaabbbbcccc aaaabbbbcccc

View File

@@ -75,6 +75,8 @@ NULL this is null
drop table t1;
CREATE TABLE t1 (a varchar(16) NOT NULL, b smallint(6) NOT NULL, c datetime NOT NULL, d smallint(6) NOT NULL);
INSERT INTO t1 SET a = "", d= "2003-01-14 03:54:55";
Warnings:
Warning 1264 Data truncated for column 'd' at row 1
UPDATE t1 SET d=1/NULL;
Warnings:
Warning 1264 Data truncated for column 'd' at row 1

View File

@@ -136,7 +136,7 @@ email104
email105
email106
email107
INSERT INTO `t1` (`id`, `kid`) VALUES ('', '150');
INSERT INTO `t1` (`id`, `kid`) VALUES ('0', '150');
SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
email
email1

View File

@@ -163,6 +163,38 @@ set @@rand_seed1=10000000,@@rand_seed2=1000000;
select ROUND(RAND(),5);
ROUND(RAND(),5)
0.02887
show variables like '%alloc%';
Variable_name Value
query_alloc_block_size 8192
query_prealloc_size 8192
range_alloc_block_size 2048
transaction_alloc_block_size 8192
transaction_prealloc_size 4096
set @@range_alloc_block_size=1024*16;
set @@query_alloc_block_size=1024*17+2;
set @@query_prealloc_size=1024*18;
set @@transaction_alloc_block_size=1024*20-1;
set @@transaction_prealloc_size=1024*21-1;
select @@query_alloc_block_size;
@@query_alloc_block_size
17408
show variables like '%alloc%';
Variable_name Value
query_alloc_block_size 17408
query_prealloc_size 18432
range_alloc_block_size 16384
transaction_alloc_block_size 19456
transaction_prealloc_size 20480
set @@range_alloc_block_size=default;
set @@query_alloc_block_size=default, @@query_prealloc_size=default;
set transaction_alloc_block_size=default, @@transaction_prealloc_size=default;
show variables like '%alloc%';
Variable_name Value
query_alloc_block_size 8192
query_prealloc_size 8192
range_alloc_block_size 2048
transaction_alloc_block_size 8192
transaction_prealloc_size 4096
set big_tables=OFFF;
ERROR 42000: Variable 'big_tables' can't be set to the value of 'OFFF'
set big_tables="OFFF";
@@ -202,6 +234,8 @@ set myisam_max_sort_file_size=100;
ERROR HY000: Variable 'myisam_max_sort_file_size' is a GLOBAL variable and should be set with SET GLOBAL
set myisam_max_extra_sort_file_size=100;
ERROR HY000: Variable 'myisam_max_extra_sort_file_size' is a GLOBAL variable and should be set with SET GLOBAL
set @@SQL_WARNINGS=NULL;
ERROR 42000: Variable 'sql_warnings' can't be set to the value of 'NULL'
set autocommit=1;
set big_tables=1;
select @@autocommit, @@big_tables;

View File

@@ -92,6 +92,8 @@ Warning 1264 Data truncated for column 'b' at row 3
Warning 1262 Data truncated, NULL supplied to NOT NULL column 'a' at row 4
Warning 1264 Data truncated for column 'b' at row 4
insert into t2(b) values('mysqlab');
Warnings:
Warning 1264 Data truncated for column 'b' at row 1
set sql_warnings=1;
insert into t2(b) values('mysqlab');
Warnings:

View File

@@ -1,3 +1,10 @@
#
# Test of alter table
#
--disable_warnings
drop table if exists t1;
--enable_warnings
CREATE TABLE t1 SELECT _utf8'test' as c1, _utf8'тест' as c2;
SHOW CREATE TABLE t1;
DELETE FROM t1;

View File

@@ -34,6 +34,7 @@ create table t1 (a tinyint not null auto_increment, b blob not null, primary key
let $1=100;
disable_query_log;
--disable_warnings
SET SQL_WARNINGS=0;
while ($1)
{
@@ -41,6 +42,7 @@ while ($1)
dec $1;
}
SET SQL_WARNINGS=1;
--enable_warnings
enable_query_log;
check table t1;
repair table t1;

View File

@@ -64,7 +64,7 @@ SELECT FOUND_ROWS();
SELECT DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
SELECT DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL ORDER BY email LIMIT 10;
INSERT INTO `t1` (`id`, `kid`) VALUES ('', '150');
INSERT INTO `t1` (`id`, `kid`) VALUES ('0', '150');
SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
SELECT FOUND_ROWS();

View File

@@ -98,6 +98,18 @@ select @@timestamp>0;
set @@rand_seed1=10000000,@@rand_seed2=1000000;
select ROUND(RAND(),5);
show variables like '%alloc%';
set @@range_alloc_block_size=1024*16;
set @@query_alloc_block_size=1024*17+2;
set @@query_prealloc_size=1024*18;
set @@transaction_alloc_block_size=1024*20-1;
set @@transaction_prealloc_size=1024*21-1;
select @@query_alloc_block_size;
show variables like '%alloc%';
set @@range_alloc_block_size=default;
set @@query_alloc_block_size=default, @@query_prealloc_size=default;
set transaction_alloc_block_size=default, @@transaction_prealloc_size=default;
show variables like '%alloc%';
# The following should give errors
@@ -138,6 +150,8 @@ select @@global.sql_auto_is_null;
set myisam_max_sort_file_size=100;
--error 1229
set myisam_max_extra_sort_file_size=100;
--error 1231
set @@SQL_WARNINGS=NULL;
# Test setting all variables

View File

@@ -37,6 +37,7 @@ bin_SCRIPTS = @server_scripts@ \
mysql_create_system_tables
EXTRA_SCRIPTS = make_binary_distribution.sh \
make_sharedlib_distribution.sh \
make_win_src_distribution.sh \
msql2mysql.sh \
mysql_config.sh \
@@ -69,6 +70,7 @@ dist_pkgdata_DATA = fill_help_tables.sql mysql_fix_privilege_tables.sql
# failures with it.
CLEANFILES = @server_scripts@ \
make_binary_distribution \
make_sharedlib_distribution \
msql2mysql \
mysql_config \
mysql_fix_privilege_tables \
@@ -141,7 +143,7 @@ SUFFIXES = .sh
# Don't update the files from bitkeeper
%::SCCS/s.%
all: fill_help_tables.sql make_win_src_distribution make_binary_distribution
all: fill_help_tables.sql make_win_src_distribution make_binary_distribution make_sharedlib_distribution
fill_help_tables.sql: fill_help_tables ../Docs/manual.texi
./fill_help_tables < ../Docs/manual.texi > fill_help_tables.sql

View File

@@ -0,0 +1,117 @@
#!/bin/sh
# The default path should be /usr/local
# Get some info from configure
# chmod +x ./scripts/setsomevars
machine=@MACHINE_TYPE@
system=@SYSTEM_TYPE@
version=@VERSION@
export machine system version
SOURCE=`pwd`
CP="cp -p"
MV="mv"
STRIP=1
DEBUG=0
SILENT=0
TMP=/tmp
SUFFIX=""
parse_arguments() {
for arg do
case "$arg" in
--debug) DEBUG=1;;
--tmp=*) TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;;
--suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;;
--no-strip) STRIP=0 ;;
--silent) SILENT=1 ;;
*)
echo "Unknown argument '$arg'"
exit 1
;;
esac
done
}
parse_arguments "$@"
BASE=$TMP/my_dist$SUFFIX
if [ -d $BASE ] ; then
rm -r -f $BASE
fi
mkdir -p $BASE/lib
for i in \
libmysql/.libs/libmysqlclient.so* \
libmysql_r/.libs/libmysqlclient_r.so*
do
if [ -f $i ]
then
$CP $i $BASE/lib
fi
done
# Change the distribution to a long descriptive name
NEW_NAME=mysql-shared-$version-$system-$machine$SUFFIX
BASE2=$TMP/$NEW_NAME
rm -r -f $BASE2
mv $BASE $BASE2
BASE=$BASE2
#if we are debugging, do not do tar/gz
if [ x$DEBUG = x1 ] ; then
exit
fi
# This is needed to prefer GNU tar instead of tar because tar can't
# always handle long filenames
PATH_DIRS=`echo $PATH | sed -e 's/^:/. /' -e 's/:$/ ./' -e 's/::/ . /g' -e 's/:/ /g' `
which_1 ()
{
for cmd
do
for d in $PATH_DIRS
do
for file in $d/$cmd
do
if test -x $file -a ! -d $file
then
echo $file
exit 0
fi
done
done
done
exit 1
}
#
# Create the result tar file
#
tar=`which_1 gnutar gtar`
if test "$?" = "1" -o "$tar" = ""
then
tar=tar
fi
echo "Using $tar to create archive"
cd $TMP
OPT=cvf
if [ x$SILENT = x1 ] ; then
OPT=cf
fi
$tar $OPT $SOURCE/$NEW_NAME.tar $NEW_NAME
cd $SOURCE
echo "Compressing archive"
gzip -9 $NEW_NAME.tar
echo "Removing temporary directory"
rm -r -f $BASE
echo "$NEW_NAME.tar.gz created"

View File

@@ -1137,8 +1137,8 @@ unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
/* Read all rows (fields or data) from server */
MYSQL_DATA *cli_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
unsigned int fields)
MYSQL_DATA * STDCALL cli_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
unsigned int fields)
{
uint field;
ulong pkt_len;

View File

@@ -119,7 +119,7 @@ set_field_to_null(Field *field)
return 0;
}
field->reset();
if (current_thd->count_cuted_fields)
if (current_thd->count_cuted_fields == CHECK_FIELD_WARN)
{
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,ER_WARN_DATA_TRUNCATED);
return 0;
@@ -176,7 +176,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
field->table->auto_increment_field_not_null= false;
return 0; // field is set in handler.cc
}
if (current_thd->count_cuted_fields)
if (current_thd->count_cuted_fields == CHECK_FIELD_WARN)
{
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,ER_WARN_NULL_TO_NOTNULL);
return 0;

View File

@@ -238,7 +238,8 @@ int berkeley_show_logs(Protocol *protocol)
MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC);
DBUG_ENTER("berkeley_show_logs");
init_alloc_root(&show_logs_root, 1024, 1024);
init_sql_alloc(&show_logs_root, BDB_LOG_ALLOC_BLOCK_SIZE,
BDB_LOG_ALLOC_BLOCK_SIZE);
my_pthread_setspecific_ptr(THR_MALLOC,&show_logs_root);
if ((error= db_env->log_archive(db_env, &all_logs,

View File

@@ -2200,6 +2200,9 @@ double user_var_entry::val(my_bool *null_value)
return (double) *(longlong*) value;
case STRING_RESULT:
return atof(value); // This is null terminated
case ROW_RESULT:
DBUG_ASSERT(1); // Impossible
break;
}
return 0.0; // Impossible
}
@@ -2219,6 +2222,9 @@ longlong user_var_entry::val_int(my_bool *null_value)
return *(longlong*) value;
case STRING_RESULT:
return strtoull(value,NULL,10); // String is null terminated
case ROW_RESULT:
DBUG_ASSERT(1); // Impossible
break;
}
return LL(0); // Impossible
}
@@ -2242,6 +2248,9 @@ String *user_var_entry::val_str(my_bool *null_value, String *str,
case STRING_RESULT:
if (str->copy(value, length, collation.collation))
str= 0; // EOM error
case ROW_RESULT:
DBUG_ASSERT(1); // Impossible
break;
}
return(str);
}

View File

@@ -33,7 +33,9 @@
#include "md5.h"
#include "sha1.h"
#include "my_aes.h"
C_MODE_START
#include "../mysys/my_static.h" // For soundex_map
C_MODE_END
String my_empty_string("",default_charset_info);

View File

@@ -85,6 +85,19 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
#define MYSQLD_NET_RETRY_COUNT 10 // Abort read after this many int.
#endif
#define TEMP_POOL_SIZE 128
#define QUERY_ALLOC_BLOCK_SIZE 8192
#define QUERY_ALLOC_PREALLOC_SIZE 8192
#define TRANS_ALLOC_BLOCK_SIZE 4096
#define TRANS_ALLOC_PREALLOC_SIZE 4096
#define RANGE_ALLOC_BLOCK_SIZE 2048
#define ACL_ALLOC_BLOCK_SIZE 1024
#define UDF_ALLOC_BLOCK_SIZE 1024
#define TABLE_ALLOC_BLOCK_SIZE 1024
#define BDB_LOG_ALLOC_BLOCK_SIZE 1024
#define WARN_ALLOC_BLOCK_SIZE 2048
#define WARN_ALLOC_PREALLOC_SIZE 1024
/*
The following parameters is to decide when to use an extra cache to
optimise seeks when reading a big table in sorted order

View File

@@ -3567,11 +3567,14 @@ enum options
OPT_BDB_LOG_BUFFER_SIZE,
OPT_BDB_MAX_LOCK,
OPT_ERROR_LOG_FILE,
OPT_DEFAULT_WEEK_FORMAT,
OPT_RANGE_ALLOC_BLOCK_SIZE,
OPT_QUERY_ALLOC_BLOCK_SIZE, OPT_QUERY_PREALLOC_SIZE,
OPT_TRANS_ALLOC_BLOCK_SIZE, OPT_TRANS_PREALLOC_SIZE,
OPT_ENABLE_SHARED_MEMORY,
OPT_SHARED_MEMORY_BASE_NAME,
OPT_OLD_PASSWORDS,
OPT_EXPIRE_LOGS_DAYS,
OPT_DEFAULT_WEEK_FORMAT,
OPT_GROUP_CONCAT_MAX_LEN,
OPT_DEFAULT_COLLATION,
OPT_SECURE_AUTH,
@@ -4392,6 +4395,11 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.preload_buff_size,
(gptr*) &max_system_variables.preload_buff_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024L, 1024, 1024*1024*1024L, 0, 1, 0},
{"query_alloc_block_size", OPT_QUERY_ALLOC_BLOCK_SIZE,
"Allocation block size for query parsing and execution",
(gptr*) &global_system_variables.query_alloc_block_size,
(gptr*) &max_system_variables.query_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
#ifdef HAVE_QUERY_CACHE
{"query_cache_limit", OPT_QUERY_CACHE_LIMIT,
"Don't cache results that are bigger than this.",
@@ -4413,6 +4421,11 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.query_cache_type,
(gptr*) &max_system_variables.query_cache_type,
0, GET_ULONG, REQUIRED_ARG, 1, 0, 2, 0, 1, 0},
{"query_prealloc_size", OPT_QUERY_PREALLOC_SIZE,
"Persistent buffer for query parsing and execution",
(gptr*) &global_system_variables.query_prealloc_size,
(gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0},
#endif /*HAVE_QUERY_CACHE*/
{"read_buffer_size", OPT_RECORD_BUFFER,
"Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.",
@@ -4451,6 +4464,11 @@ The minimum value for this variable is 4096.",
(gptr*) &slave_net_timeout, (gptr*) &slave_net_timeout, 0,
GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
#endif /* HAVE_REPLICATION */
{"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
"Allocation block size for storing ranges during optimization",
(gptr*) &global_system_variables.range_alloc_block_size,
(gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
{"read-only", OPT_READONLY,
"Make all tables readonly, with the expections for replications (slave) threads and users with the SUPER privilege.",
(gptr*) &opt_readonly,
@@ -4487,6 +4505,16 @@ The minimum value for this variable is 4096.",
"The stack size for each thread.", (gptr*) &thread_stack,
(gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
1024*32, ~0L, 0, 1024, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
"Allocation block size for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_alloc_block_size,
(gptr*) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
{"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE,
"Persistent buffer for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_prealloc_size,
(gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG,
REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0},
{"wait_timeout", OPT_WAIT_TIMEOUT,
"The number of seconds the server waits for activity on a connection before closing it.",
(gptr*) &global_system_variables.net_wait_timeout,

View File

@@ -26,11 +26,11 @@
** Create a FT or QUICK RANGE based on a key
****************************************************************************/
QUICK_SELECT *get_ft_or_quick_select_for_ref(TABLE *table, JOIN_TAB *tab)
QUICK_SELECT *get_ft_or_quick_select_for_ref(THD *thd, TABLE *table,
JOIN_TAB *tab)
{
if (tab->type == JT_FT)
return new FT_SELECT(table, &tab->ref);
else
return get_quick_select_for_ref(table, &tab->ref);
return new FT_SELECT(thd, table, &tab->ref);
return get_quick_select_for_ref(thd, table, &tab->ref);
}

View File

@@ -28,13 +28,14 @@ class FT_SELECT: public QUICK_SELECT {
public:
TABLE_REF *ref;
FT_SELECT(TABLE *table, TABLE_REF *tref) :
QUICK_SELECT (table,tref->key,1), ref(tref) { init(); }
FT_SELECT(THD *thd, TABLE *table, TABLE_REF *tref) :
QUICK_SELECT (thd, table, tref->key, 1), ref(tref) { init(); }
int init() { return error=file->ft_init(); }
int get_next() { return error=file->ft_read(record); }
};
QUICK_SELECT *get_ft_or_quick_select_for_ref(TABLE *table, JOIN_TAB *tab);
QUICK_SELECT *get_ft_or_quick_select_for_ref(THD *thd, TABLE *table,
JOIN_TAB *tab);
#endif

View File

@@ -279,6 +279,7 @@ public:
typedef struct st_qsel_param {
THD *thd;
TABLE *table;
KEY_PART *key_parts,*key_parts_end,*key[MAX_KEY];
MEM_ROOT *mem_root;
@@ -378,13 +379,14 @@ SQL_SELECT::~SQL_SELECT()
#undef index // Fix for Unixware 7
QUICK_SELECT::QUICK_SELECT(TABLE *table,uint key_nr,bool no_alloc)
QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc)
:dont_free(0),error(0),index(key_nr),max_used_key_length(0),
used_key_parts(0), head(table), it(ranges),range(0)
{
if (!no_alloc)
{
init_sql_alloc(&alloc,1024,0); // Allocates everything here
// Allocates everything through the internal memroot
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
my_pthread_setspecific_ptr(THR_MALLOC,&alloc);
}
else
@@ -456,17 +458,17 @@ SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg)
SEL_ARG *tmp;
if (type != KEY_RANGE)
{
if (!(tmp=new SEL_ARG(type)))
return 0; // out of memory
if (!(tmp= new SEL_ARG(type)))
return 0; // out of memory
tmp->prev= *next_arg; // Link into next/prev chain
(*next_arg)->next=tmp;
(*next_arg)= tmp;
}
else
{
if (!(tmp=new SEL_ARG(field,part, min_value,max_value,
min_flag, max_flag, maybe_flag)))
return 0; // out of memory
if (!(tmp= new SEL_ARG(field,part, min_value,max_value,
min_flag, max_flag, maybe_flag)))
return 0; // OOM
tmp->parent=new_parent;
tmp->next_key_part=next_key_part;
if (left != &null_element)
@@ -477,7 +479,8 @@ SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg)
(*next_arg)= tmp;
if (right != &null_element)
tmp->right=right->clone(tmp,next_arg);
if (!(tmp->right= right->clone(tmp,next_arg)))
return 0; // OOM
}
increment_use_count(1);
return tmp;
@@ -556,10 +559,11 @@ SEL_ARG *SEL_ARG::clone_tree()
{
SEL_ARG tmp_link,*next_arg,*root;
next_arg= &tmp_link;
root=clone((SEL_ARG *) 0, &next_arg);
root= clone((SEL_ARG *) 0, &next_arg);
next_arg->next=0; // Fix last link
tmp_link.next->prev=0; // Fix first link
root->use_count=0;
if (root) // If not OOM
root->use_count= 0;
return root;
}
@@ -577,7 +581,8 @@ SEL_ARG *SEL_ARG::clone_tree()
** quick_rows ; How many rows the key matches
*****************************************************************************/
int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
table_map prev_tables,
ha_rows limit, bool force_quick_range)
{
uint basflag;
@@ -618,9 +623,9 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
SEL_TREE *tree;
KEY_PART *key_parts;
PARAM param;
THD *thd= current_thd;
/* set up parameter that is passed to all functions */
param.thd= thd;
param.baseflag=basflag;
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
@@ -630,7 +635,7 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
param.mem_root= &alloc;
thd->no_errors=1; // Don't warn about NULL
init_sql_alloc(&alloc,2048,0);
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
if (!(param.key_parts = (KEY_PART*) alloc_root(&alloc,
sizeof(KEY_PART)*
head->key_parts)))
@@ -765,7 +770,7 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond)
while ((item=li++))
{
SEL_TREE *new_tree=get_mm_tree(param,item);
if (current_thd->is_fatal_error)
if (param->thd->is_fatal_error)
DBUG_RETURN(0); // out of memory
tree=tree_and(param,tree,new_tree);
if (tree && tree->type == SEL_TREE::IMPOSSIBLE)
@@ -906,7 +911,7 @@ get_mm_parts(PARAM *param, Field *field, Item_func::Functype type,
{
SEL_ARG *sel_arg=0;
if (!tree && !(tree=new SEL_TREE()))
DBUG_RETURN(0); // out of memory
DBUG_RETURN(0); // OOM
if (!value || !(value->used_tables() & ~param->read_tables))
{
sel_arg=get_mm_leaf(param,key_part->field,key_part,type,value);
@@ -918,10 +923,11 @@ get_mm_parts(PARAM *param, Field *field, Item_func::Functype type,
DBUG_RETURN(tree);
}
}
else {
else
{
// This key may be used later
if (!(sel_arg=new SEL_ARG(SEL_ARG::MAYBE_KEY)))
DBUG_RETURN(0); // out of memory
if (!(sel_arg= new SEL_ARG(SEL_ARG::MAYBE_KEY)))
DBUG_RETURN(0); // OOM
}
sel_arg->part=(uchar) key_part->part;
tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg);
@@ -1126,8 +1132,8 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part,
******************************************************************************/
/*
** Add a new key test to a key when scanning through all keys
** This will never be called for same key parts.
Add a new key test to a key when scanning through all keys
This will never be called for same key parts.
*/
static SEL_ARG *
@@ -1311,7 +1317,8 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag)
// key1->part < key2->part
key1->use_count--;
if (key1->use_count > 0)
key1=key1->clone_tree();
if (!(key1= key1->clone_tree()))
return 0; // OOM
return and_all_keys(key1,key2,clone_flag);
}
@@ -1330,7 +1337,8 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag)
if (key1->use_count > 1)
{
key1->use_count--;
key1=key1->clone_tree();
if (!(key1=key1->clone_tree()))
return 0; // OOM
key1->use_count++;
}
if (key1->type == SEL_ARG::MAYBE_KEY)
@@ -1374,6 +1382,8 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag)
if (!next || next->type != SEL_ARG::IMPOSSIBLE)
{
SEL_ARG *new_arg= e1->clone_and(e2);
if (!new_arg)
return &null_element; // End of memory
new_arg->next_key_part=next;
if (!new_tree)
{
@@ -1461,8 +1471,8 @@ key_or(SEL_ARG *key1,SEL_ARG *key2)
{
swap(SEL_ARG *,key1,key2);
}
else
key1=key1->clone_tree();
else if (!(key1=key1->clone_tree()))
return 0; // OOM
}
// Add tree at key2 to tree at key1
@@ -1530,7 +1540,10 @@ key_or(SEL_ARG *key1,SEL_ARG *key2)
SEL_ARG *next=key2->next; // Keys are not overlapping
if (key2_shared)
{
key1=key1->insert(new SEL_ARG(*key2)); // Must make copy
SEL_ARG *tmp= new SEL_ARG(*key2); // Must make copy
if (!tmp)
return 0; // OOM
key1=key1->insert(tmp);
key2->increment_use_count(key1->use_count+1);
}
else
@@ -1576,6 +1589,8 @@ key_or(SEL_ARG *key1,SEL_ARG *key2)
if (cmp >= 0 && tmp->cmp_min_to_min(key2) < 0)
{ // tmp.min <= x < key2.min
SEL_ARG *new_arg=tmp->clone_first(key2);
if (!new_arg)
return 0; // OOM
if ((new_arg->next_key_part= key1->next_key_part))
new_arg->increment_use_count(key1->use_count+1);
tmp->copy_min_to_min(key2);
@@ -1589,6 +1604,8 @@ key_or(SEL_ARG *key1,SEL_ARG *key2)
if (tmp->cmp_min_to_min(&key) > 0)
{ // key.min <= x < tmp.min
SEL_ARG *new_arg=key.clone_first(tmp);
if (!new_arg)
return 0; // OOM
if ((new_arg->next_key_part=key.next_key_part))
new_arg->increment_use_count(key1->use_count+1);
key1=key1->insert(new_arg);
@@ -1603,19 +1620,27 @@ key_or(SEL_ARG *key1,SEL_ARG *key2)
key.copy_max_to_min(tmp);
if (!(tmp=tmp->next))
{
key1=key1->insert(new SEL_ARG(key));
SEL_ARG *tmp2= new SEL_ARG(key);
if (!tmp2)
return 0; // OOM
key1=key1->insert(tmp2);
key2=key2->next;
goto end;
}
if (tmp->cmp_min_to_max(&key) > 0)
{
key1=key1->insert(new SEL_ARG(key));
SEL_ARG *tmp2= new SEL_ARG(key);
if (!tmp2)
return 0; // OOM
key1=key1->insert(tmp2);
break;
}
}
else
{
SEL_ARG *new_arg=tmp->clone_last(&key); // tmp.min <= x <= key.max
if (!new_arg)
return 0; // OOM
tmp->copy_max_to_min(&key);
tmp->increment_use_count(key1->use_count+1);
new_arg->next_key_part=key_or(tmp->next_key_part,key.next_key_part);
@@ -1632,8 +1657,11 @@ end:
SEL_ARG *next=key2->next;
if (key2_shared)
{
SEL_ARG *tmp=new SEL_ARG(*key2); // Must make copy
if (!tmp)
return 0;
key2->increment_use_count(key1->use_count+1);
key1=key1->insert(new SEL_ARG(*key2)); // Must make copy
key1=key1->insert(tmp);
}
else
key1=key1->insert(key2); // Will destroy key2_root
@@ -2222,7 +2250,8 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree)
{
QUICK_SELECT *quick;
DBUG_ENTER("get_quick_select");
if ((quick=new QUICK_SELECT(param->table,param->real_keynr[idx])))
if ((quick=new QUICK_SELECT(param->thd, param->table,
param->real_keynr[idx])))
{
if (quick->error ||
get_quick_keys(param,quick,param->key[idx],key_tree,param->min_key,0,
@@ -2334,10 +2363,10 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key,
/* Get range for retrieving rows in QUICK_SELECT::get_next */
if (!(range= new QUICK_RANGE(param->min_key,
(uint) (tmp_min_key - param->min_key),
param->max_key,
(uint) (tmp_max_key - param->max_key),
flag)))
(uint) (tmp_min_key - param->min_key),
param->max_key,
(uint) (tmp_max_key - param->max_key),
flag)))
return 1; // out of memory
set_if_bigger(quick->max_used_key_length,range->min_length);
@@ -2394,10 +2423,10 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length)
** Create a QUICK RANGE based on a key
****************************************************************************/
QUICK_SELECT *get_quick_select_for_ref(TABLE *table, TABLE_REF *ref)
QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
table->file->index_end(); // Remove old cursor
QUICK_SELECT *quick=new QUICK_SELECT(table, ref->key, 1);
QUICK_SELECT *quick=new QUICK_SELECT(thd, table, ref->key, 1);
KEY *key_info = &table->key_info[ref->key];
KEY_PART *key_part;
uint part;
@@ -2406,7 +2435,7 @@ QUICK_SELECT *get_quick_select_for_ref(TABLE *table, TABLE_REF *ref)
return 0; /* no ranges found */
if (cp_buffer_from_ref(ref))
{
if (current_thd->is_fatal_error)
if (thd->is_fatal_error)
return 0; // out of memory
return quick; // empty range
}

View File

@@ -83,7 +83,7 @@ public:
ha_rows records;
double read_time;
QUICK_SELECT(TABLE *table,uint index_arg,bool no_alloc=0);
QUICK_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0);
virtual ~QUICK_SELECT();
void reset(void) { next=0; it.rewind(); }
int init() { return error=file->index_init(index); }
@@ -127,13 +127,14 @@ class SQL_SELECT :public Sql_alloc {
SQL_SELECT();
~SQL_SELECT();
bool check_quick(bool force_quick_range=0, ha_rows limit = HA_POS_ERROR)
{ return test_quick_select(~0L,0,limit, force_quick_range) < 0; }
bool check_quick(THD *thd, bool force_quick_range, ha_rows limit)
{ return test_quick_select(thd, ~0L,0,limit, force_quick_range) < 0; }
inline bool skipp_record() { return cond ? cond->val_int() == 0 : 0; }
int test_quick_select(key_map keys,table_map prev_tables,ha_rows limit,
bool force_quick_range=0);
int test_quick_select(THD *thd, key_map keys, table_map prev_tables,
ha_rows limit, bool force_quick_range=0);
};
QUICK_SELECT *get_quick_select_for_ref(TABLE *table, struct st_table_ref *ref);
QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
struct st_table_ref *ref);
#endif

View File

@@ -235,6 +235,18 @@ sys_var_long_ptr sys_rpl_recovery_rank("rpl_recovery_rank",
sys_var_long_ptr sys_query_cache_size("query_cache_size",
&query_cache_size,
fix_query_cache_size);
sys_var_thd_ulong sys_range_alloc_block_size("range_alloc_block_size",
&SV::range_alloc_block_size);
sys_var_thd_ulong sys_query_alloc_block_size("query_alloc_block_size",
&SV::query_alloc_block_size);
sys_var_thd_ulong sys_query_prealloc_size("query_prealloc_size",
&SV::query_prealloc_size);
sys_var_thd_ulong sys_trans_alloc_block_size("transaction_alloc_block_size",
&SV::trans_alloc_block_size);
sys_var_thd_ulong sys_trans_prealloc_size("transaction_prealloc_size",
&SV::trans_prealloc_size);
#ifdef HAVE_QUERY_CACHE
sys_var_long_ptr sys_query_cache_limit("query_cache_limit",
&query_cache.query_cache_limit);
@@ -441,7 +453,9 @@ sys_var *sys_variables[]=
&sys_old_passwords,
&sys_preload_buff_size,
&sys_pseudo_thread_id,
&sys_query_alloc_block_size,
&sys_query_cache_size,
&sys_query_prealloc_size,
#ifdef HAVE_QUERY_CACHE
&sys_query_cache_limit,
&sys_query_cache_min_res_unit,
@@ -450,6 +464,7 @@ sys_var *sys_variables[]=
&sys_quote_show_create,
&sys_rand_seed1,
&sys_rand_seed2,
&sys_range_alloc_block_size,
&sys_read_buff_size,
&sys_read_rnd_buff_size,
#ifdef HAVE_REPLICATION
@@ -478,6 +493,8 @@ sys_var *sys_variables[]=
&sys_thread_cache_size,
&sys_timestamp,
&sys_tmp_table_size,
&sys_trans_alloc_block_size,
&sys_trans_prealloc_size,
&sys_tx_isolation,
#ifdef HAVE_INNOBASE_DB
&sys_innodb_max_dirty_pages_pct,
@@ -629,6 +646,8 @@ struct show_var_st init_vars[]= {
{"protocol_version", (char*) &protocol_version, SHOW_INT},
{sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS},
{sys_pseudo_thread_id.name, (char*) &sys_pseudo_thread_id, SHOW_SYS},
{sys_query_alloc_block_size.name, (char*) &sys_query_alloc_block_size,
SHOW_SYS},
#ifdef HAVE_QUERY_CACHE
{sys_query_cache_limit.name,(char*) &sys_query_cache_limit, SHOW_SYS},
{sys_query_cache_min_res_unit.name, (char*) &sys_query_cache_min_res_unit,
@@ -637,6 +656,9 @@ struct show_var_st init_vars[]= {
{sys_query_cache_type.name, (char*) &sys_query_cache_type, SHOW_SYS},
{"secure_auth", (char*) &sys_secure_auth, SHOW_SYS},
#endif /* HAVE_QUERY_CACHE */
{sys_query_prealloc_size.name, (char*) &sys_query_prealloc_size, SHOW_SYS},
{sys_range_alloc_block_size.name, (char*) &sys_range_alloc_block_size,
SHOW_SYS},
{sys_read_buff_size.name, (char*) &sys_read_buff_size, SHOW_SYS},
{sys_readonly.name, (char*) &sys_readonly, SHOW_SYS},
{sys_read_rnd_buff_size.name,(char*) &sys_read_rnd_buff_size, SHOW_SYS},
@@ -675,6 +697,9 @@ struct show_var_st init_vars[]= {
#endif
{sys_tmp_table_size.name, (char*) &sys_tmp_table_size, SHOW_SYS},
{"tmpdir", (char*) &opt_mysql_tmpdir, SHOW_CHAR_PTR},
{sys_trans_alloc_block_size.name, (char*) &sys_trans_alloc_block_size,
SHOW_SYS},
{sys_trans_prealloc_size.name, (char*) &sys_trans_prealloc_size, SHOW_SYS},
{"version", server_version, SHOW_CHAR},
{sys_net_wait_timeout.name, (char*) &sys_net_wait_timeout, SHOW_SYS},
{NullS, NullS, SHOW_LONG}
@@ -1082,7 +1107,8 @@ byte *sys_var_thd_bool::value_ptr(THD *thd, enum_var_type type,
bool sys_var::check_enum(THD *thd, set_var *var, TYPELIB *enum_names)
{
char buff[80], *value;
char buff[80];
const char *value;
String str(buff, sizeof(buff), system_charset_info), *res;
if (var->value->result_type() == STRING_RESULT)
@@ -1092,7 +1118,7 @@ bool sys_var::check_enum(THD *thd, set_var *var, TYPELIB *enum_names)
(ulong) find_type(res->c_ptr(), enum_names, 3)-1))
< 0)
{
value=res->c_ptr();
value= res ? res->c_ptr() : "NULL";
goto err;
}
}

View File

@@ -693,7 +693,7 @@ public:
uint name_length_arg, gptr data_arg)
:name_length(name_length_arg), data(data_arg)
{
name= my_memdup(name_arg, name_length, MYF(MY_WME));
name= my_memdup((byte*) name_arg, name_length, MYF(MY_WME));
links->push_back(this);
}
inline bool cmp(const char *name_cmp, uint length)

View File

@@ -186,7 +186,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables)
thd->net.last_error);
goto end;
}
init_sql_alloc(&mem,1024,0);
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0);
VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50));
while (!(read_record_info.read_record(&read_record_info)))
@@ -2450,7 +2450,7 @@ my_bool grant_init(THD *org_thd)
(void) hash_init(&column_priv_hash,&my_charset_latin1,
0,0,0, (hash_get_key) get_grant_table,
(hash_free_key) free_grant_table,0);
init_sql_alloc(&memex,1024,0);
init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0);
/* Don't do anything if running with --skip-grant */
if (!initialized)

View File

@@ -90,8 +90,9 @@ THD::THD():user_time(0), is_fatal_error(0),
{
host=user=priv_user=db=query=ip=0;
host_or_ip= "connecting host";
locked=killed=count_cuted_fields=some_tables_deleted=no_errors=password=
locked=killed=some_tables_deleted=no_errors=password=
query_start_used=prepare_command=0;
count_cuted_fields= CHECK_FIELD_IGNORE;
db_length=query_length=col_access=0;
query_error= tmp_table_used= 0;
next_insert_id=last_insert_id=0;
@@ -147,7 +148,7 @@ THD::THD():user_time(0), is_fatal_error(0),
bzero((char*) &transaction.mem_root,sizeof(transaction.mem_root));
bzero((char*) &con_root,sizeof(con_root));
bzero((char*) &warn_root,sizeof(warn_root));
init_alloc_root(&warn_root, 1024, 0);
init_alloc_root(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE);
user_connect=(USER_CONN *)0;
hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0,
(hash_get_key) get_var_key,
@@ -230,9 +231,11 @@ void THD::init(void)
void THD::init_for_queries()
{
init_sql_alloc(&mem_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC);
init_sql_alloc(&mem_root, variables.query_alloc_block_size,
variables.query_prealloc_size);
init_sql_alloc(&transaction.mem_root,
TRANS_MEM_ROOT_BLOCK_SIZE, TRANS_MEM_ROOT_PREALLOC);
variables.trans_alloc_block_size,
variables.trans_prealloc_size);
}

View File

@@ -34,6 +34,9 @@ enum enum_log_type { LOG_CLOSED, LOG_TO_BE_OPENED, LOG_NORMAL, LOG_NEW, LOG_BIN}
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
DELAY_KEY_WRITE_ALL };
enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN,
CHECK_FIELD_ERROR_FOR_NULL };
extern char internal_table_name[2];
/* log info errors */
@@ -389,10 +392,15 @@ struct system_variables
ulong table_type;
ulong tmp_table_size;
ulong tx_isolation;
/* Determines if which non-standard SQL behaviour should be enabled */
/* Determines which non-standard SQL behaviour should be enabled */
ulong sql_mode;
ulong default_week_format;
ulong max_seeks_for_key;
ulong range_alloc_block_size;
ulong query_alloc_block_size;
ulong query_prealloc_size;
ulong trans_alloc_block_size;
ulong trans_prealloc_size;
ulong group_concat_max_len;
/*
In slave thread we need to know in behalf of which
@@ -569,6 +577,7 @@ public:
uint select_number; //number of select (used for EXPLAIN)
/* variables.transaction_isolation is reset to this after each commit */
enum_tx_isolation session_tx_isolation;
enum_check_fields count_cuted_fields;
/* for user variables replication*/
DYNAMIC_ARRAY user_var_events;
@@ -576,7 +585,7 @@ public:
char scramble[SCRAMBLE_LENGTH+1];
bool slave_thread;
bool set_query_id,locked,count_cuted_fields,some_tables_deleted;
bool set_query_id,locked,some_tables_deleted;
bool last_cuted_field;
bool no_errors, allow_sum_func, password, is_fatal_error;
bool query_start_used,last_insert_id_used,insert_id_used,rand_used;

View File

@@ -89,7 +89,7 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order,
select=make_select(table,0,0,conds,&error);
if (error)
DBUG_RETURN(-1);
if ((select && select->check_quick(safe_update, limit)) || !limit)
if ((select && select->check_quick(thd, safe_update, limit)) || !limit)
{
delete select;
free_underlaid_joins(thd, &thd->lex.select_lex);

View File

@@ -599,7 +599,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables,
{
cond->fix_fields(thd, tables, &cond); // can never fail
SQL_SELECT *res= make_select(table,0,0,cond,error);
return (*error || (res && res->check_quick(0, HA_POS_ERROR))) ? 0 : res;
return (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR))) ? 0 : res;
}
/*
@@ -627,6 +627,8 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen,
Item *cond= new Item_func_like(new Item_field(pfname),
new Item_string(mask,mlen,pfname->charset()),
(char*) "\\");
if (thd->is_fatal_error)
return 0; // OOM
return prepare_simple_select(thd,cond,tables,table,error);
}

View File

@@ -240,9 +240,14 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
info.handle_duplicates=duplic;
info.update_fields=&update_fields;
info.update_values=&update_values;
// Don't count warnings for simple inserts
if (values_list.elements > 1 || (thd->options & OPTION_WARNINGS))
thd->count_cuted_fields = 1;
/*
Count warnings for all inserts.
For single line insert, generate an error if try to set a NOT NULL field
to NULL
*/
thd->count_cuted_fields= ((values_list.elements == 1) ?
CHECK_FIELD_ERROR_FOR_NULL :
CHECK_FIELD_WARN);
thd->cuted_fields = 0L;
table->next_number_field=table->found_next_number_field;
@@ -394,7 +399,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
}
thd->proc_info="end";
table->next_number_field=0;
thd->count_cuted_fields=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
thd->next_insert_id=0; // Reset this if wrongly used
if (duplic != DUP_ERROR)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
@@ -1391,7 +1396,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
restore_record(table,default_values); // Get empty record
table->next_number_field=table->found_next_number_field;
thd->count_cuted_fields=1; // calc cuted fields
thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
thd->cuted_fields=0;
if (info.handle_duplicates != DUP_REPLACE)
table->file->extra(HA_EXTRA_WRITE_CACHE);
@@ -1409,7 +1414,7 @@ select_insert::~select_insert()
table->next_number_field=0;
table->file->extra(HA_EXTRA_RESET);
}
thd->count_cuted_fields=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
}
@@ -1559,7 +1564,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table->next_number_field=table->found_next_number_field;
restore_record(table,default_values); // Get empty record
thd->count_cuted_fields=1; // count warnings
thd->count_cuted_fields= CHECK_FIELD_WARN; // count warnings
thd->cuted_fields=0;
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)

View File

@@ -252,7 +252,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
restore_record(table,default_values);
thd->count_cuted_fields=1; /* calc cuted fields */
thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */
thd->cuted_fields=0L;
if (ex->line_term->length() && field_term->length())
{
@@ -293,7 +293,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (file >= 0) my_close(file,MYF(0));
free_blobs(table); /* if pack_blob was used */
table->copy_blobs=0;
thd->count_cuted_fields=0; /* Don`t calc cuted fields */
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
/*
We must invalidate the table in query cache before binlog writing and

View File

@@ -888,12 +888,15 @@ bool mysql_stmt_prepare(THD *thd, char *packet, uint packet_length)
{
MEM_ROOT thd_root= thd->mem_root;
PREP_STMT stmt;
SELECT_LEX *sl;
DBUG_ENTER("mysql_stmt_prepare");
bzero((char*) &stmt, sizeof(stmt));
stmt.stmt_id= ++thd->current_stmt_id;
init_sql_alloc(&stmt.mem_root, 8192, 8192);
init_sql_alloc(&stmt.mem_root,
thd->variables.query_alloc_block_size,
thd->variables.query_prealloc_size);
stmt.thd= thd;
stmt.thd->mem_root= stmt.mem_root;
@@ -908,7 +911,7 @@ bool mysql_stmt_prepare(THD *thd, char *packet, uint packet_length)
my_pthread_setprio(pthread_self(),WAIT_PRIOR);
// save WHERE clause pointers to avoid damaging they by optimisation
for (SELECT_LEX *sl= thd->lex.all_selects_list;
for (sl= thd->lex.all_selects_list;
sl;
sl= sl->next_select_in_list())
{
@@ -943,8 +946,9 @@ err:
void mysql_stmt_execute(THD *thd, char *packet)
{
ulong stmt_id= uint4korr(packet);
PREP_STMT *stmt;
ulong stmt_id= uint4korr(packet);
PREP_STMT *stmt;
SELECT_LEX *sl;
DBUG_ENTER("mysql_stmt_execute");
if (!(stmt=find_prepared_statement(thd, stmt_id, "execute")))
@@ -963,11 +967,13 @@ void mysql_stmt_execute(THD *thd, char *packet)
LEX thd_lex= thd->lex;
thd->lex= stmt->lex;
for (SELECT_LEX *sl= stmt->lex.all_selects_list;
for (sl= stmt->lex.all_selects_list;
sl;
sl= sl->next_select_in_list())
{
// copy WHERE clause pointers to avoid damaging they by optimisation
/*
Copy WHERE clause pointers to avoid damaging they by optimisation
*/
if (sl->prep_where)
sl->where= sl->prep_where->copy_andor_structure(thd);
DBUG_ASSERT(sl->join == 0);

View File

@@ -1574,7 +1574,8 @@ err:
Approximate how many records will be used in each table
*****************************************************************************/
static ha_rows get_quick_record_count(SQL_SELECT *select,TABLE *table,
static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
TABLE *table,
key_map keys,ha_rows limit)
{
int error;
@@ -1583,7 +1584,7 @@ static ha_rows get_quick_record_count(SQL_SELECT *select,TABLE *table,
{
select->head=table;
table->reginfo.impossible_range=0;
if ((error=select->test_quick_select(keys,(table_map) 0,limit))
if ((error=select->test_quick_select(thd, keys,(table_map) 0,limit))
== 1)
DBUG_RETURN(select->quick->records);
if (error == -1)
@@ -1866,8 +1867,8 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
found_const_table_map,
s->on_expr ? s->on_expr : conds,
&error);
records= get_quick_record_count(select,s->table, s->const_keys,
join->row_limit);
records= get_quick_record_count(join->thd, select, s->table,
s->const_keys, join->row_limit);
s->quick=select->quick;
s->needed_reg=select->needed_reg;
select->quick=0;
@@ -3218,9 +3219,9 @@ store_val_in_field(Field *field,Item *item)
bool error;
THD *thd=current_thd;
ha_rows cuted_fields=thd->cuted_fields;
thd->count_cuted_fields=1;
thd->count_cuted_fields= CHECK_FIELD_WARN;
error= item->save_in_field(field, 1);
thd->count_cuted_fields=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
return error || cuted_fields != thd->cuted_fields;
}
@@ -3377,7 +3378,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/* Join with outer join condition */
COND *orig_cond=sel->cond;
sel->cond=and_conds(sel->cond,tab->on_expr);
if (sel->test_quick_select(tab->keys,
if (sel->test_quick_select(join->thd, tab->keys,
used_tables & ~ current_map,
(join->select_options &
OPTION_FOUND_ROWS ?
@@ -3390,7 +3391,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*/
sel->cond=orig_cond;
if (!tab->on_expr ||
sel->test_quick_select(tab->keys,
sel->test_quick_select(join->thd, tab->keys,
used_tables & ~ current_map,
(join->select_options &
OPTION_FOUND_ROWS ?
@@ -5828,7 +5829,8 @@ test_if_quick_select(JOIN_TAB *tab)
{
delete tab->select->quick;
tab->select->quick=0;
return tab->select->test_quick_select(tab->keys,(table_map) 0,HA_POS_ERROR);
return tab->select->test_quick_select(tab->join->thd, tab->keys,
(table_map) 0, HA_POS_ERROR);
}
@@ -6921,7 +6923,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
For impossible ranges (like when doing a lookup on NULL on a NOT NULL
field, quick will contain an empty record set.
*/
if (!(select->quick=get_ft_or_quick_select_for_ref(table, tab)))
if (!(select->quick=get_ft_or_quick_select_for_ref(tab->join->thd,
table, tab)))
goto err;
}
}

View File

@@ -2305,7 +2305,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (use_timestamp)
new_table->time_stamp=0;
new_table->next_number_field=new_table->found_next_number_field;
thd->count_cuted_fields=1; // calc cuted fields
thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
thd->cuted_fields=0L;
thd->proc_info="copy to tmp table";
next_insert_id=thd->next_insert_id; // Remember for loggin
@@ -2315,7 +2315,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
handle_duplicates,
order_num, order, &copied, &deleted);
thd->last_insert_id=next_insert_id; // Needed for correct log
thd->count_cuted_fields=0; // Don`t calc cuted fields
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
new_table->time_stamp=save_time_stamp;
if (table->tmp_table)
@@ -2724,9 +2724,9 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
while (!t->file->rnd_next(t->record[0]))
{
ha_checksum row_crc= 0;
if (t->record[0] != t->field[0]->ptr)
if (t->record[0] != (byte*) t->field[0]->ptr)
row_crc= my_checksum(row_crc, t->record[0],
t->field[0]->ptr - t->record[0]);
((byte*) t->field[0]->ptr) - t->record[0]);
for (uint i= 0; i < t->fields; i++ )
{
@@ -2735,10 +2735,11 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
{
String tmp;
f->val_str(&tmp,&tmp);
row_crc= my_checksum(row_crc, tmp.ptr(), tmp.length());
row_crc= my_checksum(row_crc, (byte*) tmp.ptr(), tmp.length());
}
else
row_crc= my_checksum(row_crc, f->ptr, f->pack_length());
row_crc= my_checksum(row_crc, (byte*) f->ptr,
f->pack_length());
}
crc+= row_crc;

View File

@@ -22,6 +22,7 @@
#include "sql_select.h"
#include <hash.h>
#include <thr_alarm.h>
#include <malloc.h>
/* Intern key cache variables */
extern "C" pthread_mutex_t THR_LOCK_keycache;
@@ -365,6 +366,32 @@ Next alarm time: %lu\n",
thd->proc_info="malloc";
my_checkmalloc();
TERMINATE(stdout); // Write malloc information
#ifdef HAVE_MALLINFO
struct mallinfo info= mallinfo();
printf("\nMemory status:\n\
Non-mmapped space allocated from system: %d\n\
Number of free chunks: %d\n\
Number of fastbin blocks: %d\n\
Number of mmapped regions: %d\n\
Space in mmapped regions: %d\n\
Maximum total allocated space: %d\n\
Space available in freed fastbin blocks: %d\n\
Total allocated space: %d\n\
Total free space: %d\n\
Top-most, releasable space: %d\n",
(int) info.arena,
(int) info.ordblks,
(int) info.smblks,
(int) info.hblks,
(int) info.hblkhd,
(int) info.usmblks,
(int) info.fsmblks,
(int) info.uordblks,
(int) info.fordblks,
(int) info.keepcost);
#endif
puts("");
if (thd)
thd->proc_info=0;
}

View File

@@ -128,7 +128,7 @@ void udf_init()
my_rwlock_init(&THR_LOCK_udf,NULL);
init_sql_alloc(&mem, 1024,0);
init_sql_alloc(&mem, UDF_ALLOC_BLOCK_SIZE, 0);
THD *new_thd = new THD;
if (!new_thd ||
hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0))

View File

@@ -152,7 +152,7 @@ int mysql_update(THD *thd,
table->used_keys=0;
select=make_select(table,0,0,conds,&error);
if (error ||
(select && select->check_quick(safe_update, limit)) || !limit)
(select && select->check_quick(thd, safe_update, limit)) || !limit)
{
delete select;
free_underlaid_joins(thd, &thd->lex.select_lex);
@@ -295,7 +295,7 @@ int mysql_update(THD *thd,
init_read_record(&info,thd,table,select,0,1);
updated= found= 0;
thd->count_cuted_fields=1; /* calc cuted fields */
thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */
thd->cuted_fields=0L;
thd->proc_info="Updating";
query_id=thd->query_id;
@@ -386,7 +386,7 @@ int mysql_update(THD *thd,
thd->insert_id_used ? thd->insert_id() : 0L,buff);
DBUG_PRINT("info",("%d records updated",updated));
}
thd->count_cuted_fields=0; /* calc cuted fields */
thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
free_io_cache(table);
DBUG_RETURN(0);
@@ -492,7 +492,7 @@ int multi_update::prepare(List<Item> &not_used_values, SELECT_LEX_UNIT *unit)
uint i, max_fields;
DBUG_ENTER("multi_update::prepare");
thd->count_cuted_fields=1;
thd->count_cuted_fields= CHECK_FIELD_WARN;
thd->cuted_fields=0L;
thd->proc_info="updating main table";
@@ -733,7 +733,7 @@ multi_update::~multi_update()
}
if (copy_field)
delete [] copy_field;
thd->count_cuted_fields=0; // Restore this setting
thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
if (!trans_safe)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
}

View File

@@ -90,7 +90,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag,
outparam->db_stat = db_stat;
error=1;
init_sql_alloc(&outparam->mem_root,1024,0);
init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC);
my_pthread_setspecific_ptr(THR_MALLOC,&outparam->mem_root);