1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-05 13:16:09 +03:00

Merge mysql.com:/home/jonas/src/mysql-4.1

into mysql.com:/home/jonas/src/mysql-4.1-ndb


ndb/src/mgmsrv/Services.cpp:
  Auto merged
configure.in:
  Auto merged
acinclude.m4:
  Auto merged
This commit is contained in:
unknown
2004-08-24 21:07:08 +02:00
31 changed files with 254 additions and 162 deletions

View File

@@ -1,10 +1,33 @@
# Local macros for automake & autoconf # Local macros for automake & autoconf
AC_DEFUN(MYSQL_CHECK_READLINE_DECLARES_HIST_ENTRY,[
AC_CACHE_CHECK([HIST_ENTRY is declared in readline/readline.h], mysql_cv_hist_entry_declared,
AC_TRY_COMPILE(
[
#include "stdio.h"
#undef __P // readline-4.2 declares own __P
#include "readline/readline.h"
],
[
HIST_ENTRY entry;
],
[
mysql_cv_hist_entry_declared=yes
AC_DEFINE_UNQUOTED(HAVE_HIST_ENTRY, [1],
[HIST_ENTRY is defined in the outer libeditreadline])
],
[mysql_cv_libedit_interface=no]
)
)
])
AC_DEFUN(MYSQL_CHECK_LIBEDIT_INTERFACE,[ AC_DEFUN(MYSQL_CHECK_LIBEDIT_INTERFACE,[
AC_CACHE_CHECK([libedit variant of rl_completion_entry_function], mysql_cv_libedit_interface, AC_CACHE_CHECK([libedit variant of rl_completion_entry_function], mysql_cv_libedit_interface,
AC_TRY_COMPILE( AC_TRY_COMPILE(
[ [
#include "stdio.h" #include "stdio.h"
#undef __P // readline-4.2 declares own __P
#include "readline/readline.h" #include "readline/readline.h"
], ],
[ [
@@ -26,6 +49,7 @@ AC_DEFUN(MYSQL_CHECK_NEW_RL_INTERFACE,[
AC_TRY_COMPILE( AC_TRY_COMPILE(
[ [
#include "stdio.h" #include "stdio.h"
#undef __P // readline-4.2 declares own __P
#include "readline/readline.h" #include "readline/readline.h"
], ],
[ [

View File

@@ -84,6 +84,7 @@ extern "C" {
#if defined( __WIN__) || defined(OS2) #if defined( __WIN__) || defined(OS2)
#include <conio.h> #include <conio.h>
#elif !defined(__NETWARE__) #elif !defined(__NETWARE__)
#undef __P // readline-4.2 declares own __P
#include <readline/readline.h> #include <readline/readline.h>
#define HAVE_READLINE #define HAVE_READLINE
#endif #endif
@@ -294,7 +295,7 @@ static const char *server_default_groups[]=
HIST_ENTRY is defined for libedit, but not for the real readline HIST_ENTRY is defined for libedit, but not for the real readline
Need to redefine it for real readline to find it Need to redefine it for real readline to find it
*/ */
#if !defined(USE_LIBEDIT_INTERFACE) #if !defined(HAVE_HIST_ENTRY)
typedef struct _hist_entry { typedef struct _hist_entry {
const char *line; const char *line;
const char *data; const char *data;
@@ -753,8 +754,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
opt_nopager= 1; opt_nopager= 1;
case OPT_MYSQL_PROTOCOL: case OPT_MYSQL_PROTOCOL:
{ {
if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
~(ulong) 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", argument); fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1); exit(1);

View File

@@ -249,7 +249,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break; break;
case OPT_MYSQL_PROTOCOL: case OPT_MYSQL_PROTOCOL:
{ {
if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0) if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", argument); fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1); exit(1);

View File

@@ -633,8 +633,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break; break;
case OPT_MYSQL_PROTOCOL: case OPT_MYSQL_PROTOCOL:
{ {
if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) == if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
~(ulong) 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", argument); fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1); exit(1);

View File

@@ -271,7 +271,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case 'V': print_version(); exit(0); case 'V': print_version(); exit(0);
case OPT_MYSQL_PROTOCOL: case OPT_MYSQL_PROTOCOL:
{ {
if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0) if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", argument); fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1); exit(1);

View File

@@ -582,8 +582,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
} }
case (int) OPT_MYSQL_PROTOCOL: case (int) OPT_MYSQL_PROTOCOL:
{ {
if ((opt_protocol= find_type(argument, &sql_protocol_typelib, 0)) if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
== ~(ulong) 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", argument); fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1); exit(1);

View File

@@ -203,7 +203,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
#endif #endif
case OPT_MYSQL_PROTOCOL: case OPT_MYSQL_PROTOCOL:
{ {
if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0) if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", argument); fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1); exit(1);

View File

@@ -268,7 +268,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break; break;
case OPT_MYSQL_PROTOCOL: case OPT_MYSQL_PROTOCOL:
{ {
if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0) if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", argument); fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1); exit(1);

View File

@@ -1,3 +1,20 @@
# Copyright (C) 2004 MySQL AB
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA
## Process this file with automake to create Makefile.in ## Process this file with automake to create Makefile.in
SUBDIRS= @readline_basedir@ SUBDIRS= @readline_basedir@

View File

@@ -2301,6 +2301,20 @@ AC_ARG_WITH(libedit,
[ with_libedit=undefined ] [ with_libedit=undefined ]
) )
#
# We support next variants of compilation:
# --with-readline
# | yes | no | undefined
# --with-libedit | | |
# ---------------+----------------+------+----------------------------------
# yes | ERROR! | use libedit from mysql sources
# ---------------+----------------+------+----------------------------------
# no | use readline | use system readline or external libedit
# | from mysql | according to results of m4 tests
# ---------------+ sources (if it + +----------------------------------
# undefined | is presented) | | use libedit from mysql sources
compile_readline="no" compile_readline="no"
compile_libedit="no" compile_libedit="no"
@@ -2328,6 +2342,7 @@ then
readline_link="\$(top_builddir)/cmd-line-utils/libedit/liblibedit.a" readline_link="\$(top_builddir)/cmd-line-utils/libedit/liblibedit.a"
readline_h_ln_cmd="\$(LN) -s \$(top_builddir)/cmd-line-utils/libedit/readline readline" readline_h_ln_cmd="\$(LN) -s \$(top_builddir)/cmd-line-utils/libedit/readline readline"
compile_libedit=yes compile_libedit=yes
AC_DEFINE_UNQUOTED(HAVE_HIST_ENTRY)
AC_DEFINE_UNQUOTED(USE_LIBEDIT_INTERFACE, 1) AC_DEFINE_UNQUOTED(USE_LIBEDIT_INTERFACE, 1)
elif test "$with_readline" = "yes" elif test "$with_readline" = "yes"
then then
@@ -2339,8 +2354,12 @@ then
compile_readline=yes compile_readline=yes
AC_DEFINE_UNQUOTED(USE_NEW_READLINE_INTERFACE, 1) AC_DEFINE_UNQUOTED(USE_NEW_READLINE_INTERFACE, 1)
else else
AC_LANG_SAVE
AC_LANG_CPLUSPLUS
MYSQL_CHECK_LIBEDIT_INTERFACE MYSQL_CHECK_LIBEDIT_INTERFACE
MYSQL_CHECK_NEW_RL_INTERFACE MYSQL_CHECK_NEW_RL_INTERFACE
MYSQL_CHECK_READLINE_DECLARES_HIST_ENTRY
AC_LANG_RESTORE
if [test "$mysql_cv_new_rl_interface" = "yes"] || [test "$mysql_cv_libedit_interface" = "no"] if [test "$mysql_cv_new_rl_interface" = "yes"] || [test "$mysql_cv_libedit_interface" = "no"]
then then
readline_link="-lreadline" readline_link="-lreadline"

View File

@@ -0,0 +1,9 @@
<default>
ok
TCP
ok
SOCKET
ok
ERROR 2047: Wrong or unknown protocol
ERROR 2047: Wrong or unknown protocol
Unknown option to protocol: NullS

View File

@@ -0,0 +1,10 @@
# test for Bug #4998 "--protocol doesn't reject bad values"
--exec echo "select ' ok' as '<default>'" | $MYSQL
--exec echo "select ' ok' as 'TCP'" | $MYSQL --protocol=TCP
--exec echo "select ' ok' as 'SOCKET'" | $MYSQL --protocol=SOCKET
--exec echo "select ' ok' as 'PIPE'" | $MYSQL --protocol=PIPE 2>&1
--exec echo "select ' ok' as 'MEMORY'" | $MYSQL --protocol=MEMORY 2>&1
--exec echo "select ' ok' as 'NullS'" | $MYSQL --protocol=NullS 2>&1

View File

@@ -158,7 +158,7 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a,
(flag=mi_compare_text(keyseg->charset,a,a_length,b,b_length, (flag=mi_compare_text(keyseg->charset,a,a_length,b,b_length,
(my_bool) ((nextflag & SEARCH_PREFIX) && (my_bool) ((nextflag & SEARCH_PREFIX) &&
next_key_length <= 0), next_key_length <= 0),
!(nextflag & SEARCH_PREFIX)))) (my_bool)!(nextflag & SEARCH_PREFIX))))
return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag);
a+=a_length; a+=a_length;
b+=b_length; b+=b_length;
@@ -171,7 +171,7 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a,
(flag= mi_compare_text(keyseg->charset, a, a_length, b, b_length, (flag= mi_compare_text(keyseg->charset, a, a_length, b, b_length,
(my_bool) ((nextflag & SEARCH_PREFIX) && (my_bool) ((nextflag & SEARCH_PREFIX) &&
next_key_length <= 0), next_key_length <= 0),
!(nextflag & SEARCH_PREFIX)))) (my_bool)!(nextflag & SEARCH_PREFIX))))
return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag);
a=end; a=end;
b+=length; b+=length;

View File

@@ -148,7 +148,7 @@ private:
*/ */
class Grep : public SimulatedBlock //GrepParticipant class Grep : public SimulatedBlock //GrepParticipant
{ {
//BLOCK_DEFINES(Grep); BLOCK_DEFINES(Grep);
public: public:
Grep(const Configuration & conf); Grep(const Configuration & conf);
@@ -519,19 +519,6 @@ public:
typedef void (Grep::* ExecSignalLocal1) (Signal* signal); typedef void (Grep::* ExecSignalLocal1) (Signal* signal);
typedef void (Grep::PSCoord::* ExecSignalLocal2) (Signal* signal); typedef void (Grep::PSCoord::* ExecSignalLocal2) (Signal* signal);
typedef void (Grep::PSPart::* ExecSignalLocal4) (Signal* signal); typedef void (Grep::PSPart::* ExecSignalLocal4) (Signal* signal);
void
addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal1 f, bool force = false){
addRecSignalImpl(gsn, (ExecFunction)f, force);
}
void
addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal2 f, bool force = false){
addRecSignalImpl(gsn, (ExecFunction)f, force);
}
void
addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal4 f, bool force = false){
addRecSignalImpl(gsn, (ExecFunction)f, force);
}
}; };

View File

@@ -132,7 +132,7 @@ Grep::~Grep()
{ {
} }
//BLOCK_FUNCTIONS(Grep); BLOCK_FUNCTIONS(Grep);
Grep::PSPart::PSPart(Grep * sb) : Grep::PSPart::PSPart(Grep * sb) :
BlockComponent(sb), BlockComponent(sb),

View File

@@ -654,13 +654,21 @@ CommandInterpreter::executeShow(char* parameters)
api_nodes= 0, api_nodes= 0,
mgm_nodes= 0; mgm_nodes= 0;
for(i=0; i < state->no_of_nodes; i++) {
if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB &&
state->node_states[i].version != 0){
master_id= state->node_states[i].dynamic_id;
break;
}
}
for(i=0; i < state->no_of_nodes; i++) { for(i=0; i < state->no_of_nodes; i++) {
switch(state->node_states[i].node_type) { switch(state->node_states[i].node_type) {
case NDB_MGM_NODE_TYPE_API: case NDB_MGM_NODE_TYPE_API:
api_nodes++; api_nodes++;
break; break;
case NDB_MGM_NODE_TYPE_NDB: case NDB_MGM_NODE_TYPE_NDB:
if (state->node_states[i].dynamic_id > master_id) if (state->node_states[i].dynamic_id < master_id)
master_id= state->node_states[i].dynamic_id; master_id= state->node_states[i].dynamic_id;
ndb_nodes++; ndb_nodes++;
break; break;

View File

@@ -2304,7 +2304,7 @@ bool
MgmtSrvr::alloc_node_id(NodeId * nodeId, MgmtSrvr::alloc_node_id(NodeId * nodeId,
enum ndb_mgm_node_type type, enum ndb_mgm_node_type type,
struct sockaddr *client_addr, struct sockaddr *client_addr,
socklen_t *client_addr_len) SOCKET_SIZE_TYPE *client_addr_len)
{ {
Guard g(&f_node_id_mutex); Guard g(&f_node_id_mutex);
#if 0 #if 0
@@ -2885,4 +2885,6 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
} }
template class Vector<SigMatch>; template class Vector<SigMatch>;
#if __SUNPRO_CC != 0x560
template bool SignalQueue::waitFor<SigMatch>(Vector<SigMatch>&, SigMatch*&, NdbApiSignal*&, unsigned); template bool SignalQueue::waitFor<SigMatch>(Vector<SigMatch>&, SigMatch*&, NdbApiSignal*&, unsigned);
#endif

View File

@@ -467,7 +467,7 @@ public:
*/ */
bool getNextNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type) const ; bool getNextNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type) const ;
bool alloc_node_id(NodeId * _nodeId, enum ndb_mgm_node_type type, bool alloc_node_id(NodeId * _nodeId, enum ndb_mgm_node_type type,
struct sockaddr *client_addr, socklen_t *client_addr_len); struct sockaddr *client_addr, SOCKET_SIZE_TYPE *client_addr_len);
/** /**
* *

View File

@@ -402,7 +402,7 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
} }
struct sockaddr addr; struct sockaddr addr;
socklen_t addrlen= sizeof(addr); SOCKET_SIZE_TYPE addrlen= sizeof(addr);
int r = getpeername(m_socket, &addr, &addrlen); int r = getpeername(m_socket, &addr, &addrlen);
if (r != 0 ) { if (r != 0 ) {
m_output->println(cmd); m_output->println(cmd);

View File

@@ -37,7 +37,7 @@
NdbEventOperation::NdbEventOperation(Ndb *theNdb, NdbEventOperation::NdbEventOperation(Ndb *theNdb,
const char* eventName, const char* eventName,
const int bufferLength) int bufferLength)
: m_impl(* new NdbEventOperationImpl(*this,theNdb, : m_impl(* new NdbEventOperationImpl(*this,theNdb,
eventName, eventName,
bufferLength)) bufferLength))

View File

@@ -778,7 +778,8 @@ main(void){
#endif #endif
template class Vector<NdbScanFilterImpl::State>; template class Vector<NdbScanFilterImpl::State>;
#if __SUNPRO_CC != 0x560
template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint32); template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint32);
template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint64); template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint64);
#endif

View File

@@ -161,7 +161,9 @@ private:
/** /**
* Block number handling * Block number handling
*/ */
public:
static const unsigned MAX_NO_THREADS = 4711; static const unsigned MAX_NO_THREADS = 4711;
private:
struct ThreadData { struct ThreadData {
static const Uint32 ACTIVE = (1 << 16) | 1; static const Uint32 ACTIVE = (1 << 16) | 1;

View File

@@ -386,6 +386,7 @@ sync_down(NDBT_Context* ctx){
if(threads){ if(threads){
ctx->decProperty("PauseThreads"); ctx->decProperty("PauseThreads");
} }
return 0;
} }
int int
@@ -397,6 +398,7 @@ sync_up_and_wait(NDBT_Context* ctx){
if(threads){ if(threads){
ndbout_c("wait completed"); ndbout_c("wait completed");
} }
return 0;
} }
int int

View File

@@ -1058,9 +1058,8 @@ void mysql_read_default_options(struct st_mysql_options *options,
options->max_allowed_packet= atoi(opt_arg); options->max_allowed_packet= atoi(opt_arg);
break; break;
case 28: /* protocol */ case 28: /* protocol */
if ((options->protocol = find_type(opt_arg, if ((options->protocol= find_type(opt_arg,
&sql_protocol_typelib,0)) &sql_protocol_typelib,0)) <= 0)
== ~(ulong) 0)
{ {
fprintf(stderr, "Unknown option to protocol: %s\n", opt_arg); fprintf(stderr, "Unknown option to protocol: %s\n", opt_arg);
exit(1); exit(1);

View File

@@ -1423,7 +1423,8 @@ int ha_ndbcluster::write_row(byte *record)
{ {
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
DBUG_PRINT("info", DBUG_PRINT("info",
("Trying to set next auto increment value to %u", next_val)); ("Trying to set next auto increment value to %lu",
(ulong) next_val));
if (m_ndb->setAutoIncrementValue((NDBTAB *) m_table, next_val, true)) if (m_ndb->setAutoIncrementValue((NDBTAB *) m_table, next_val, true))
DBUG_PRINT("info", DBUG_PRINT("info",
("Setting next auto increment value to %u", next_val)); ("Setting next auto increment value to %u", next_val));

View File

@@ -919,7 +919,7 @@ double Item_param::val()
This works for example when user says SELECT ?+0.0 and supplies This works for example when user says SELECT ?+0.0 and supplies
time value for the placeholder. time value for the placeholder.
*/ */
return (double) TIME_to_ulonglong(&value.time); return ulonglong2double(TIME_to_ulonglong(&value.time));
case NULL_VALUE: case NULL_VALUE:
return 0.0; return 0.0;
default: default:

View File

@@ -787,7 +787,7 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commi
LINT_INIT(old_message); LINT_INIT(old_message);
(void) pthread_mutex_lock(&LOCK_open); (void) pthread_mutex_lock(&LOCK_open);
if (need_exit_cond= must_wait) if ((need_exit_cond= must_wait))
{ {
if (thd->global_read_lock) // This thread had the read locks if (thd->global_read_lock) // This thread had the read locks
{ {
@@ -805,7 +805,11 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commi
} }
if (!abort_on_refresh && !result) if (!abort_on_refresh && !result)
protect_against_global_read_lock++; protect_against_global_read_lock++;
if (unlikely(need_exit_cond)) // global read locks are rare /*
The following is only true in case of a global read locks (which is rare)
and if old_message is set
*/
if (unlikely(need_exit_cond))
thd->exit_cond(old_message); thd->exit_cond(old_message);
else else
pthread_mutex_unlock(&LOCK_open); pthread_mutex_unlock(&LOCK_open);

View File

@@ -4562,6 +4562,10 @@ replicating a LOAD DATA INFILE command.",
"The buffer that is allocated to cache index and rows for BDB tables.", "The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG, (gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG,
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0}, REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0},
/* QQ: The following should be removed soon! (bdb_max_lock preferred) */
{"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
{"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE, {"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE,
"The buffer that is allocated to cache index and rows for BDB tables.", "The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0, (gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0,
@@ -4570,15 +4574,16 @@ replicating a LOAD DATA INFILE command.",
"The maximum number of locks you can have active on a BDB table.", "The maximum number of locks you can have active on a BDB table.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
/* QQ: The following should be removed soon! */
{"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
#endif /* HAVE_BERKELEY_DB */ #endif /* HAVE_BERKELEY_DB */
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG, (gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024L, IO_SIZE, ~0L, 0, IO_SIZE, 0}, REQUIRED_ARG, 32*1024L, IO_SIZE, ~0L, 0, IO_SIZE, 0},
{"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
"Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!",
(gptr*) &global_system_variables.bulk_insert_buff_size,
(gptr*) &max_system_variables.bulk_insert_buff_size,
0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0},
{"connect_timeout", OPT_CONNECT_TIMEOUT, {"connect_timeout", OPT_CONNECT_TIMEOUT,
"The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.", "The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.",
(gptr*) &connect_timeout, (gptr*) &connect_timeout, (gptr*) &connect_timeout, (gptr*) &connect_timeout,
@@ -4589,18 +4594,38 @@ replicating a LOAD DATA INFILE command.",
(gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb,
0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0}, 0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0},
#endif #endif
{"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT, { "date_format", OPT_DATE_FORMAT,
"How long a INSERT DELAYED thread should wait for INSERT statements before terminating.", "The DATE format (For future).",
(gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0, (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "datetime_format", OPT_DATETIME_FORMAT,
"The DATETIME/TIMESTAMP format (for future).",
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "default_week_format", OPT_DEFAULT_WEEK_FORMAT,
"The default week format used by WEEK() functions.",
(gptr*) &global_system_variables.default_week_format,
(gptr*) &max_system_variables.default_week_format,
0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0},
{"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT, {"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT,
"After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.", "After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.",
(gptr*) &delayed_insert_limit, (gptr*) &delayed_insert_limit, 0, GET_ULONG, (gptr*) &delayed_insert_limit, (gptr*) &delayed_insert_limit, 0, GET_ULONG,
REQUIRED_ARG, DELAYED_LIMIT, 1, ~0L, 0, 1, 0}, REQUIRED_ARG, DELAYED_LIMIT, 1, ~0L, 0, 1, 0},
{"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
"How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
(gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0,
GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
{ "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE, { "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE,
"What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.", "What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.",
(gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG, (gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG,
REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0}, REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0},
{"expire_logs_days", OPT_EXPIRE_LOGS_DAYS,
"Binary logs will be rotated after expire-log-days days ",
(gptr*) &expire_logs_days,
(gptr*) &expire_logs_days, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, 99, 0, 1, 0},
{ "flush_time", OPT_FLUSH_TIME, { "flush_time", OPT_FLUSH_TIME,
"A dedicated thread is created to flush all tables at the given interval.", "A dedicated thread is created to flush all tables at the given interval.",
(gptr*) &flush_time, (gptr*) &flush_time, 0, GET_ULONG, REQUIRED_ARG, (gptr*) &flush_time, (gptr*) &flush_time, 0, GET_ULONG, REQUIRED_ARG,
@@ -4609,14 +4634,14 @@ replicating a LOAD DATA INFILE command.",
"List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE)", "List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE)",
0, 0, 0, GET_STR, 0, 0, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "ft_min_word_len", OPT_FT_MIN_WORD_LEN,
"The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
(gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG,
REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_max_word_len", OPT_FT_MAX_WORD_LEN, { "ft_max_word_len", OPT_FT_MAX_WORD_LEN,
"The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.", "The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
(gptr*) &ft_max_word_len, (gptr*) &ft_max_word_len, 0, GET_ULONG, (gptr*) &ft_max_word_len, (gptr*) &ft_max_word_len, 0, GET_ULONG,
REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0}, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_min_word_len", OPT_FT_MIN_WORD_LEN,
"The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
(gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG,
REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT, { "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT,
"Number of best matches to use for query expansion", "Number of best matches to use for query expansion",
(gptr*) &ft_query_expansion_limit, (gptr*) &ft_query_expansion_limit, 0, GET_ULONG, (gptr*) &ft_query_expansion_limit, (gptr*) &ft_query_expansion_limit, 0, GET_ULONG,
@@ -4631,48 +4656,52 @@ replicating a LOAD DATA INFILE command.",
(gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG, (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG,
REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0}, REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0},
#ifdef HAVE_INNOBASE_DB #ifdef HAVE_INNOBASE_DB
{"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS,
"Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
(gptr*) &innobase_mirrored_log_groups,
(gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10,
0, 1, 0},
{"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP,
"Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
(gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group,
0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0},
{"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
"Size of each log file in a log group in megabytes.",
(gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0,
GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0},
{"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
"The size of the buffer which InnoDB uses to write log to the log files on disk.",
(gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0,
GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0},
{"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
"The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
(gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0,
GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0},
{"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
"If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
(gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0},
{"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE, {"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
"Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.",
(gptr*) &innobase_additional_mem_pool_size, (gptr*) &innobase_additional_mem_pool_size,
(gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG, (gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG,
1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0}, 1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0},
{"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
"If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
(gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0},
{"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
"The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
(gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0,
GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0},
{"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS, {"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS,
"Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads, "Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads,
(gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0, (gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0,
1, 0}, 1, 0},
{"innodb_open_files", OPT_INNODB_OPEN_FILES, {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
"How many files at the maximum InnoDB keeps open at the same time.", "Helps to save your data in case the disk image of the database becomes corrupt.",
(gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0, (gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0,
GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0}, GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0},
{"innodb_lock_wait_timeout", OPT_INNODB_LOCK_WAIT_TIMEOUT, {"innodb_lock_wait_timeout", OPT_INNODB_LOCK_WAIT_TIMEOUT,
"Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.", "Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.",
(gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout,
0, GET_LONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0}, 0, GET_LONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
{"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
"The size of the buffer which InnoDB uses to write log to the log files on disk.",
(gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0,
GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0},
{"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
"Size of each log file in a log group in megabytes.",
(gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0,
GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0},
{"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP,
"Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
(gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group,
0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0},
{"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS,
"Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
(gptr*) &innobase_mirrored_log_groups,
(gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10,
0, 1, 0},
{"innodb_open_files", OPT_INNODB_OPEN_FILES,
"How many files at the maximum InnoDB keeps open at the same time.",
(gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0,
GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0},
#ifdef HAVE_REPLICATION #ifdef HAVE_REPLICATION
/* /*
Disabled for the 4.1.3 release. Disabling just this paragraph of code is Disabled for the 4.1.3 release. Disabling just this paragraph of code is
@@ -4697,10 +4726,6 @@ replicating a LOAD DATA INFILE command.",
"Helps in performance tuning in heavily concurrent environments.", "Helps in performance tuning in heavily concurrent environments.",
(gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency,
0, GET_LONG, REQUIRED_ARG, 8, 1, 1000, 0, 1, 0}, 0, GET_LONG, REQUIRED_ARG, 8, 1, 1000, 0, 1, 0},
{"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
"Helps to save your data in case the disk image of the database becomes corrupt.",
(gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0,
GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0},
#endif /* HAVE_INNOBASE_DB */ #endif /* HAVE_INNOBASE_DB */
{"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT,
"The number of seconds the server waits for activity on an interactive connection before closing it.", "The number of seconds the server waits for activity on an interactive connection before closing it.",
@@ -4720,6 +4745,12 @@ replicating a LOAD DATA INFILE command.",
0, (GET_ULL | GET_ASK_ADDR), 0, (GET_ULL | GET_ASK_ADDR),
REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD, REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD,
IO_SIZE, 0}, IO_SIZE, 0},
{"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
"This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
(gptr*) &dflt_key_cache_var.param_age_threshold,
(gptr*) 0,
0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
300, 100, ~0L, 0, 100, 0},
{"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE,
"The default size of key cache blocks", "The default size of key cache blocks",
(gptr*) &dflt_key_cache_var.param_block_size, (gptr*) &dflt_key_cache_var.param_block_size,
@@ -4732,12 +4763,6 @@ replicating a LOAD DATA INFILE command.",
(gptr*) 0, (gptr*) 0,
0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100, 0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100,
1, 100, 0, 1, 0}, 1, 100, 0, 1, 0},
{"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
"This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
(gptr*) &dflt_key_cache_var.param_age_threshold,
(gptr*) 0,
0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
300, 100, ~0L, 0, 100, 0},
{"long_query_time", OPT_LONG_QUERY_TIME, {"long_query_time", OPT_LONG_QUERY_TIME,
"Log all queries that have taken more than long_query_time seconds to execute to file.", "Log all queries that have taken more than long_query_time seconds to execute to file.",
(gptr*) &global_system_variables.long_query_time, (gptr*) &global_system_variables.long_query_time,
@@ -4768,14 +4793,14 @@ value. Will also apply to relay logs if max_relay_log_size is 0. \
The minimum value for this variable is 4096.", The minimum value for this variable is 4096.",
(gptr*) &max_binlog_size, (gptr*) &max_binlog_size, 0, GET_ULONG, (gptr*) &max_binlog_size, (gptr*) &max_binlog_size, 0, GET_ULONG,
REQUIRED_ARG, 1024*1024L*1024L, IO_SIZE, 1024*1024L*1024L, 0, IO_SIZE, 0}, REQUIRED_ARG, 1024*1024L*1024L, IO_SIZE, 1024*1024L*1024L, 0, IO_SIZE, 0},
{"max_connections", OPT_MAX_CONNECTIONS,
"The number of simultaneous clients allowed.", (gptr*) &max_connections,
(gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1,
0},
{"max_connect_errors", OPT_MAX_CONNECT_ERRORS, {"max_connect_errors", OPT_MAX_CONNECT_ERRORS,
"If there is more than this number of interrupted connections from a host this host will be blocked from further connections.", "If there is more than this number of interrupted connections from a host this host will be blocked from further connections.",
(gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG, (gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG,
REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0}, REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0},
{"max_connections", OPT_MAX_CONNECTIONS,
"The number of simultaneous clients allowed.", (gptr*) &max_connections,
(gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1,
0},
{"max_delayed_threads", OPT_MAX_DELAYED_THREADS, {"max_delayed_threads", OPT_MAX_DELAYED_THREADS,
"Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.", "Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.",
(gptr*) &global_system_variables.max_insert_delayed_threads, (gptr*) &global_system_variables.max_insert_delayed_threads,
@@ -4828,11 +4853,6 @@ The minimum value for this variable is 4096.",
"After this many write locks, allow some read locks to run in between.", "After this many write locks, allow some read locks to run in between.",
(gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG, (gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG,
REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0},
{"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
"Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!",
(gptr*) &global_system_variables.bulk_insert_buff_size,
(gptr*) &max_system_variables.bulk_insert_buff_size,
0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0},
{"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE,
"Block size to be used for MyISAM index pages.", "Block size to be used for MyISAM index pages.",
(gptr*) &opt_myisam_block_size, (gptr*) &opt_myisam_block_size,
@@ -4871,16 +4891,16 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.net_buffer_length, (gptr*) &global_system_variables.net_buffer_length,
(gptr*) &max_system_variables.net_buffer_length, 0, GET_ULONG, (gptr*) &max_system_variables.net_buffer_length, 0, GET_ULONG,
REQUIRED_ARG, 16384, 1024, 1024*1024L, 0, 1024, 0}, REQUIRED_ARG, 16384, 1024, 1024*1024L, 0, 1024, 0},
{"net_retry_count", OPT_NET_RETRY_COUNT,
"If a read on a communication port is interrupted, retry this many times before giving up.",
(gptr*) &global_system_variables.net_retry_count,
(gptr*) &max_system_variables.net_retry_count,0,
GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0},
{"net_read_timeout", OPT_NET_READ_TIMEOUT, {"net_read_timeout", OPT_NET_READ_TIMEOUT,
"Number of seconds to wait for more data from a connection before aborting the read.", "Number of seconds to wait for more data from a connection before aborting the read.",
(gptr*) &global_system_variables.net_read_timeout, (gptr*) &global_system_variables.net_read_timeout,
(gptr*) &max_system_variables.net_read_timeout, 0, GET_ULONG, (gptr*) &max_system_variables.net_read_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_READ_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, REQUIRED_ARG, NET_READ_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
{"net_retry_count", OPT_NET_RETRY_COUNT,
"If a read on a communication port is interrupted, retry this many times before giving up.",
(gptr*) &global_system_variables.net_retry_count,
(gptr*) &max_system_variables.net_retry_count,0,
GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0},
{"net_write_timeout", OPT_NET_WRITE_TIMEOUT, {"net_write_timeout", OPT_NET_WRITE_TIMEOUT,
"Number of seconds to wait for a block to be written to a connection before aborting the write.", "Number of seconds to wait for a block to be written to a connection before aborting the write.",
(gptr*) &global_system_variables.net_write_timeout, (gptr*) &global_system_variables.net_write_timeout,
@@ -4932,11 +4952,21 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.query_prealloc_size, (gptr*) &global_system_variables.query_prealloc_size,
(gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG, (gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0},
{"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
"Allocation block size for storing ranges during optimization",
(gptr*) &global_system_variables.range_alloc_block_size,
(gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
{"read_buffer_size", OPT_RECORD_BUFFER, {"read_buffer_size", OPT_RECORD_BUFFER,
"Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.", "Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.",
(gptr*) &global_system_variables.read_buff_size, (gptr*) &global_system_variables.read_buff_size,
(gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, (gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG,
128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0}, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0},
{"read_only", OPT_READONLY,
"Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege",
(gptr*) &opt_readonly,
(gptr*) &opt_readonly,
0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER, {"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER,
"When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks. If not set, then it's set to the value of record_buffer.", "When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks. If not set, then it's set to the value of record_buffer.",
(gptr*) &global_system_variables.read_rnd_buff_size, (gptr*) &global_system_variables.read_rnd_buff_size,
@@ -4969,16 +4999,6 @@ The minimum value for this variable is 4096.",
(gptr*) &slave_net_timeout, (gptr*) &slave_net_timeout, 0, (gptr*) &slave_net_timeout, (gptr*) &slave_net_timeout, 0,
GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
#endif /* HAVE_REPLICATION */ #endif /* HAVE_REPLICATION */
{"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
"Allocation block size for storing ranges during optimization",
(gptr*) &global_system_variables.range_alloc_block_size,
(gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
{"read-only", OPT_READONLY,
"Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege",
(gptr*) &opt_readonly,
(gptr*) &opt_readonly,
0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{"slow_launch_time", OPT_SLOW_LAUNCH_TIME, {"slow_launch_time", OPT_SLOW_LAUNCH_TIME,
"If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented.", "If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented.",
(gptr*) &slow_launch_time, (gptr*) &slow_launch_time, 0, GET_ULONG, (gptr*) &slow_launch_time, (gptr*) &slow_launch_time, 0, GET_ULONG,
@@ -5008,23 +5028,28 @@ The minimum value for this variable is 4096.",
"The number of open tables for all threads.", (gptr*) &table_cache_size, "The number of open tables for all threads.", (gptr*) &table_cache_size,
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L, (gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L,
0, 1, 0}, 0, 1, 0},
{"thread_concurrency", OPT_THREAD_CONCURRENCY,
"Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.",
(gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG,
DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
{"thread_cache_size", OPT_THREAD_CACHE_SIZE, {"thread_cache_size", OPT_THREAD_CACHE_SIZE,
"How many threads we should keep in a cache for reuse.", "How many threads we should keep in a cache for reuse.",
(gptr*) &thread_cache_size, (gptr*) &thread_cache_size, 0, GET_ULONG, (gptr*) &thread_cache_size, (gptr*) &thread_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, 16384, 0, 1, 0}, REQUIRED_ARG, 0, 0, 16384, 0, 1, 0},
{"thread_concurrency", OPT_THREAD_CONCURRENCY,
"Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.",
(gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG,
DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
{"thread_stack", OPT_THREAD_STACK,
"The stack size for each thread.", (gptr*) &thread_stack,
(gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
1024L*128L, ~0L, 0, 1024, 0},
{ "time_format", OPT_TIME_FORMAT,
"The TIME format (for future).",
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"tmp_table_size", OPT_TMP_TABLE_SIZE, {"tmp_table_size", OPT_TMP_TABLE_SIZE,
"If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.", "If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.",
(gptr*) &global_system_variables.tmp_table_size, (gptr*) &global_system_variables.tmp_table_size,
(gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG, (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024*1024L, 1024, ~0L, 0, 1, 0}, REQUIRED_ARG, 32*1024*1024L, 1024, ~0L, 0, 1, 0},
{"thread_stack", OPT_THREAD_STACK,
"The stack size for each thread.", (gptr*) &thread_stack,
(gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
1024L*128L, ~0L, 0, 1024, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE, {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
"Allocation block size for transactions to be stored in binary log", "Allocation block size for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_alloc_block_size, (gptr*) &global_system_variables.trans_alloc_block_size,
@@ -5041,31 +5066,6 @@ The minimum value for this variable is 4096.",
(gptr*) &max_system_variables.net_wait_timeout, 0, GET_ULONG, (gptr*) &max_system_variables.net_wait_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT), REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT),
0, 1, 0}, 0, 1, 0},
{"expire_logs_days", OPT_EXPIRE_LOGS_DAYS,
"Binary logs will be rotated after expire-log-days days ",
(gptr*) &expire_logs_days,
(gptr*) &expire_logs_days, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, 99, 0, 1, 0},
{ "default-week-format", OPT_DEFAULT_WEEK_FORMAT,
"The default week format used by WEEK() functions.",
(gptr*) &global_system_variables.default_week_format,
(gptr*) &max_system_variables.default_week_format,
0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0},
{ "date-format", OPT_DATE_FORMAT,
"The DATE format (For future).",
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "datetime-format", OPT_DATETIME_FORMAT,
"The DATETIME/TIMESTAMP format (for future).",
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "time-format", OPT_TIME_FORMAT,
"The TIME format (for future).",
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
(gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
}; };

View File

@@ -155,11 +155,13 @@ bool foreign_key_prefix(Key *a, Key *b)
** Thread specific functions ** Thread specific functions
****************************************************************************/ ****************************************************************************/
THD::THD():user_time(0), current_arena(this), is_fatal_error(0), THD::THD()
last_insert_id_used(0), :user_time(0), global_read_lock(0), is_fatal_error(0),
insert_id_used(0), rand_used(0), time_zone_used(0), last_insert_id_used(0),
in_lock_tables(0), global_read_lock(0), bootstrap(0) insert_id_used(0), rand_used(0), time_zone_used(0),
in_lock_tables(0), bootstrap(0)
{ {
current_arena= this;
host= user= priv_user= db= ip=0; host= user= priv_user= db= ip=0;
host_or_ip= "connecting host"; host_or_ip= "connecting host";
locked=some_tables_deleted=no_errors=password= 0; locked=some_tables_deleted=no_errors=password= 0;
@@ -439,10 +441,13 @@ void THD::awake(bool prepare_to_die)
it is the true value but maybe current_mutex is not yet non-zero (we're it is the true value but maybe current_mutex is not yet non-zero (we're
in the middle of enter_cond() and there is a "memory order in the middle of enter_cond() and there is a "memory order
inversion"). So we test the mutex too to not lock 0. inversion"). So we test the mutex too to not lock 0.
Note that there is a small chance we fail to kill. If victim has locked Note that there is a small chance we fail to kill. If victim has locked
current_mutex, and hasn't entered enter_cond(), then we don't know it's current_mutex, but hasn't yet entered enter_cond() (which means that
going to wait on cond. Then victim goes into its cond "forever" (until current_cond and current_mutex are 0), then the victim will not get
we issue a second KILL). True we have set its thd->killed but it may not a signal and it may wait "forever" on the cond (until
we issue a second KILL or the status it's waiting for happens).
It's true that we have set its thd->killed but it may not
see it immediately and so may have time to reach the cond_wait(). see it immediately and so may have time to reach the cond_wait().
*/ */
if (mysys_var->current_cond && mysys_var->current_mutex) if (mysys_var->current_cond && mysys_var->current_mutex)

View File

@@ -8014,7 +8014,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array,
Item *itemptr=*order->item; Item *itemptr=*order->item;
if (itemptr->type() == Item::INT_ITEM) if (itemptr->type() == Item::INT_ITEM)
{ /* Order by position */ { /* Order by position */
uint count= itemptr->val_int(); uint count= (uint) itemptr->val_int();
if (!count || count > fields.elements) if (!count || count > fields.elements)
{ {
my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR), my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),

View File

@@ -3281,7 +3281,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
ha_rows *deleted) ha_rows *deleted)
{ {
int error; int error;
Copy_field *copy,*copy_end, *next_field= 0; Copy_field *copy,*copy_end;
ulong found_count,delete_count; ulong found_count,delete_count;
THD *thd= current_thd; THD *thd= current_thd;
uint length; uint length;
@@ -3291,6 +3291,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
List<Item> fields; List<Item> fields;
List<Item> all_fields; List<Item> all_fields;
ha_rows examined_rows; ha_rows examined_rows;
bool auto_increment_field_copied= 0;
DBUG_ENTER("copy_data_between_tables"); DBUG_ENTER("copy_data_between_tables");
if (!(copy= new Copy_field[to->fields])) if (!(copy= new Copy_field[to->fields]))
@@ -3309,7 +3310,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (def->field) if (def->field)
{ {
if (*ptr == to->next_number_field) if (*ptr == to->next_number_field)
next_field= copy_end; auto_increment_field_copied= TRUE;
(copy_end++)->set(*ptr,def->field,0); (copy_end++)->set(*ptr,def->field,0);
} }
@@ -3368,11 +3369,14 @@ copy_data_between_tables(TABLE *from,TABLE *to,
} }
thd->row_count++; thd->row_count++;
if (to->next_number_field) if (to->next_number_field)
to->next_number_field->reset(); {
if (auto_increment_field_copied)
to->auto_increment_field_not_null= TRUE;
else
to->next_number_field->reset();
}
for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++) for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++)
{ {
if (copy_ptr == next_field)
to->auto_increment_field_not_null= TRUE;
copy_ptr->do_copy(copy_ptr); copy_ptr->do_copy(copy_ptr);
} }
if ((error=to->file->write_row((byte*) to->record[0]))) if ((error=to->file->write_row((byte*) to->record[0])))